1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <inttypes.h>
26
27 #include "util/u_math.h"
28 #include "util/vma.h"
29
30 struct util_vma_hole {
31 struct list_head link;
32 uint64_t offset;
33 uint64_t size;
34 };
35
36 #define util_vma_foreach_hole(_hole, _heap) \
37 list_for_each_entry(struct util_vma_hole, _hole, &(_heap)->holes, link)
38
39 #define util_vma_foreach_hole_safe(_hole, _heap) \
40 list_for_each_entry_safe(struct util_vma_hole, _hole, &(_heap)->holes, link)
41
42 #define util_vma_foreach_hole_safe_rev(_hole, _heap) \
43 list_for_each_entry_safe_rev(struct util_vma_hole, _hole, &(_heap)->holes, link)
44
45 void
util_vma_heap_init(struct util_vma_heap * heap,uint64_t start,uint64_t size)46 util_vma_heap_init(struct util_vma_heap *heap,
47 uint64_t start, uint64_t size)
48 {
49 list_inithead(&heap->holes);
50 util_vma_heap_free(heap, start, size);
51
52 /* Default to using high addresses */
53 heap->alloc_high = true;
54 }
55
56 void
util_vma_heap_finish(struct util_vma_heap * heap)57 util_vma_heap_finish(struct util_vma_heap *heap)
58 {
59 util_vma_foreach_hole_safe(hole, heap)
60 free(hole);
61 }
62
63 #ifndef NDEBUG
64 static void
util_vma_heap_validate(struct util_vma_heap * heap)65 util_vma_heap_validate(struct util_vma_heap *heap)
66 {
67 uint64_t prev_offset = 0;
68 util_vma_foreach_hole(hole, heap) {
69 assert(hole->offset > 0);
70 assert(hole->size > 0);
71
72 if (&hole->link == heap->holes.next) {
73 /* This must be the top-most hole. Assert that, if it overflows, it
74 * overflows to 0, i.e. 2^64.
75 */
76 assert(hole->size + hole->offset == 0 ||
77 hole->size + hole->offset > hole->offset);
78 } else {
79 /* This is not the top-most hole so it must not overflow and, in
80 * fact, must be strictly lower than the top-most hole. If
81 * hole->size + hole->offset == prev_offset, then we failed to join
82 * holes during a util_vma_heap_free.
83 */
84 assert(hole->size + hole->offset > hole->offset &&
85 hole->size + hole->offset < prev_offset);
86 }
87 prev_offset = hole->offset;
88 }
89 }
90 #else
91 #define util_vma_heap_validate(heap)
92 #endif
93
94 static void
util_vma_hole_alloc(struct util_vma_hole * hole,uint64_t offset,uint64_t size)95 util_vma_hole_alloc(struct util_vma_hole *hole,
96 uint64_t offset, uint64_t size)
97 {
98 assert(hole->offset <= offset);
99 assert(hole->size >= offset - hole->offset + size);
100
101 if (offset == hole->offset && size == hole->size) {
102 /* Just get rid of the hole. */
103 list_del(&hole->link);
104 free(hole);
105 return;
106 }
107
108 assert(offset - hole->offset <= hole->size - size);
109 uint64_t waste = (hole->size - size) - (offset - hole->offset);
110 if (waste == 0) {
111 /* We allocated at the top. Shrink the hole down. */
112 hole->size -= size;
113 return;
114 }
115
116 if (offset == hole->offset) {
117 /* We allocated at the bottom. Shrink the hole up. */
118 hole->offset += size;
119 hole->size -= size;
120 return;
121 }
122
123 /* We allocated in the middle. We need to split the old hole into two
124 * holes, one high and one low.
125 */
126 struct util_vma_hole *high_hole = calloc(1, sizeof(*hole));
127 high_hole->offset = offset + size;
128 high_hole->size = waste;
129
130 /* Adjust the hole to be the amount of space left at he bottom of the
131 * original hole.
132 */
133 hole->size = offset - hole->offset;
134
135 /* Place the new hole before the old hole so that the list is in order
136 * from high to low.
137 */
138 list_addtail(&high_hole->link, &hole->link);
139 }
140
141 uint64_t
util_vma_heap_alloc(struct util_vma_heap * heap,uint64_t size,uint64_t alignment)142 util_vma_heap_alloc(struct util_vma_heap *heap,
143 uint64_t size, uint64_t alignment)
144 {
145 /* The caller is expected to reject zero-size allocations */
146 assert(size > 0);
147 assert(alignment > 0);
148
149 util_vma_heap_validate(heap);
150
151 if (heap->alloc_high) {
152 util_vma_foreach_hole_safe(hole, heap) {
153 if (size > hole->size)
154 continue;
155
156 /* Compute the offset as the highest address where a chunk of the
157 * given size can be without going over the top of the hole.
158 *
159 * This calculation is known to not overflow because we know that
160 * hole->size + hole->offset can only overflow to 0 and size > 0.
161 */
162 uint64_t offset = (hole->size - size) + hole->offset;
163
164 /* Align the offset. We align down and not up because we are
165 * allocating from the top of the hole and not the bottom.
166 */
167 offset = (offset / alignment) * alignment;
168
169 if (offset < hole->offset)
170 continue;
171
172 util_vma_hole_alloc(hole, offset, size);
173 util_vma_heap_validate(heap);
174 return offset;
175 }
176 } else {
177 util_vma_foreach_hole_safe_rev(hole, heap) {
178 if (size > hole->size)
179 continue;
180
181 uint64_t offset = hole->offset;
182
183 /* Align the offset */
184 uint64_t misalign = offset % alignment;
185 if (misalign) {
186 uint64_t pad = alignment - misalign;
187 if (pad > hole->size - size)
188 continue;
189
190 offset += pad;
191 }
192
193 util_vma_hole_alloc(hole, offset, size);
194 util_vma_heap_validate(heap);
195 return offset;
196 }
197 }
198
199 /* Failed to allocate */
200 return 0;
201 }
202
203 bool
util_vma_heap_alloc_addr(struct util_vma_heap * heap,uint64_t offset,uint64_t size)204 util_vma_heap_alloc_addr(struct util_vma_heap *heap,
205 uint64_t offset, uint64_t size)
206 {
207 /* An offset of 0 is reserved for allocation failure. It is not a valid
208 * address and cannot be allocated.
209 */
210 assert(offset > 0);
211
212 /* Allocating something with a size of 0 is also not valid. */
213 assert(size > 0);
214
215 /* It's possible for offset + size to wrap around if we touch the top of
216 * the 64-bit address space, but we cannot go any higher than 2^64.
217 */
218 assert(offset + size == 0 || offset + size > offset);
219
220 /* Find the hole if one exists. */
221 util_vma_foreach_hole_safe(hole, heap) {
222 if (hole->offset > offset)
223 continue;
224
225 /* Holes are ordered high-to-low so the first hole we find with
226 * hole->offset <= is our hole. If it's not big enough to contain the
227 * requested range, then the allocation fails.
228 */
229 assert(hole->offset <= offset);
230 if (hole->size < offset - hole->offset + size)
231 return false;
232
233 util_vma_hole_alloc(hole, offset, size);
234 return true;
235 }
236
237 /* We didn't find a suitable hole */
238 return false;
239 }
240
241 void
util_vma_heap_free(struct util_vma_heap * heap,uint64_t offset,uint64_t size)242 util_vma_heap_free(struct util_vma_heap *heap,
243 uint64_t offset, uint64_t size)
244 {
245 /* An offset of 0 is reserved for allocation failure. It is not a valid
246 * address and cannot be freed.
247 */
248 assert(offset > 0);
249
250 /* Freeing something with a size of 0 is also not valid. */
251 assert(size > 0);
252
253 /* It's possible for offset + size to wrap around if we touch the top of
254 * the 64-bit address space, but we cannot go any higher than 2^64.
255 */
256 assert(offset + size == 0 || offset + size > offset);
257
258 util_vma_heap_validate(heap);
259
260 /* Find immediately higher and lower holes if they exist. */
261 struct util_vma_hole *high_hole = NULL, *low_hole = NULL;
262 util_vma_foreach_hole(hole, heap) {
263 if (hole->offset <= offset) {
264 low_hole = hole;
265 break;
266 }
267 high_hole = hole;
268 }
269
270 if (high_hole)
271 assert(offset + size <= high_hole->offset);
272 bool high_adjacent = high_hole && offset + size == high_hole->offset;
273
274 if (low_hole) {
275 assert(low_hole->offset + low_hole->size > low_hole->offset);
276 assert(low_hole->offset + low_hole->size <= offset);
277 }
278 bool low_adjacent = low_hole && low_hole->offset + low_hole->size == offset;
279
280 if (low_adjacent && high_adjacent) {
281 /* Merge the two holes */
282 low_hole->size += size + high_hole->size;
283 list_del(&high_hole->link);
284 free(high_hole);
285 } else if (low_adjacent) {
286 /* Merge into the low hole */
287 low_hole->size += size;
288 } else if (high_adjacent) {
289 /* Merge into the high hole */
290 high_hole->offset = offset;
291 high_hole->size += size;
292 } else {
293 /* Neither hole is adjacent; make a new one */
294 struct util_vma_hole *hole = calloc(1, sizeof(*hole));
295
296 hole->offset = offset;
297 hole->size = size;
298
299 /* Add it after the high hole so we maintain high-to-low ordering */
300 if (high_hole)
301 list_add(&hole->link, &high_hole->link);
302 else
303 list_add(&hole->link, &heap->holes);
304 }
305
306 util_vma_heap_validate(heap);
307 }
308
309 void
util_vma_heap_print(struct util_vma_heap * heap,FILE * fp,const char * tab,uint64_t total_size)310 util_vma_heap_print(struct util_vma_heap *heap, FILE *fp,
311 const char *tab, uint64_t total_size)
312 {
313 fprintf(fp, "%sutil_vma_heap:\n", tab);
314
315 uint64_t total_free = 0;
316 util_vma_foreach_hole(hole, heap) {
317 fprintf(fp, "%s hole: offset = %"PRIu64" (0x%"PRIx64", "
318 "size = %"PRIu64" (0x%"PRIx64")\n",
319 tab, hole->offset, hole->offset, hole->size, hole->size);
320 total_free += hole->size;
321 }
322 assert(total_free <= total_size);
323 fprintf(fp, "%s%"PRIu64"B (0x%"PRIx64") free (%.2f%% full)\n",
324 tab, total_free, total_free,
325 ((double)(total_size - total_free) / (double)total_size) * 100);
326 }
327