1 // Copyright 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #pragma once
15
16 #include <assert.h>
17 #include <errno.h>
18 #include <inttypes.h>
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22
23 #ifdef ADDRESS_SPACE_NAMESPACE
24 namespace ADDRESS_SPACE_NAMESPACE {
25 #endif
26
27 // This is ported from goldfish_address_space, allowing it to be used for
28 // general sub-allocations of any buffer range.
29 // It is also a pure header library, so there are no compiler tricks needed
30 // to use this in a particular implementation. please don't include this
31 // in a file that is included everywhere else, though.
32
33 /* Represents a continuous range of addresses and a flag if this block is
34 * available
35 */
36 struct address_block {
37 uint64_t offset;
38 union {
39 uint64_t size_available; /* VMSTATE_x does not support bit fields */
40 struct {
41 uint64_t size : 63;
42 uint64_t available : 1;
43 };
44 };
45 };
46
47 /* A dynamic array of address blocks, with the following invariant:
48 * blocks[i].size > 0
49 * blocks[i+1].offset = blocks[i].offset + blocks[i].size
50 */
51 struct address_space_allocator {
52 struct address_block *blocks;
53 int size;
54 int capacity;
55 uint64_t total_bytes;
56 };
57
58 #define ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET (~(uint64_t)0)
59 #if defined(__APPLE__) && defined(__arm64__)
60 #define ANDROID_EMU_ADDRESS_SPACE_DEFAULT_PAGE_SIZE 16384
61 #else
62 #define ANDROID_EMU_ADDRESS_SPACE_DEFAULT_PAGE_SIZE 4096
63 #endif
64
65 /* The assert function to abort if something goes wrong. */
address_space_assert(bool condition)66 static void address_space_assert(bool condition) {
67 #ifdef ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC
68 ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC(condition);
69 #else
70 assert(condition);
71 #endif
72 }
73
address_space_realloc(void * ptr,size_t size)74 static void* address_space_realloc(void* ptr, size_t size) {
75 #ifdef ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC
76 return ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC(ptr, size);
77 #else
78 void* res = realloc(ptr, size);
79 return res;
80 #endif
81 }
82
address_space_free(void * ptr)83 static void address_space_free(void* ptr) {
84 #ifdef ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC
85 return ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC(ptr);
86 #else
87 free(ptr);
88 #endif
89 }
90
91 /* Looks for the smallest (to reduce fragmentation) available block with size to
92 * fit the requested amount and returns its index or -1 if none is available.
93 */
address_space_allocator_find_available_block(struct address_block * block,int n_blocks,uint64_t size_at_least)94 static int address_space_allocator_find_available_block(
95 struct address_block *block,
96 int n_blocks,
97 uint64_t size_at_least)
98 {
99 int index = -1;
100 uint64_t size_at_index = 0;
101 int i;
102
103 address_space_assert(n_blocks >= 1);
104
105 for (i = 0; i < n_blocks; ++i, ++block) {
106 uint64_t this_size = block->size;
107 address_space_assert(this_size > 0);
108
109 if (this_size >= size_at_least && block->available &&
110 (index < 0 || this_size < size_at_index)) {
111 index = i;
112 size_at_index = this_size;
113 }
114 }
115
116 return index;
117 }
118
address_space_allocator_find_available_block_at_offset(struct address_block * block,int n_blocks,uint64_t size_at_least,uint64_t offset)119 static int address_space_allocator_find_available_block_at_offset(
120 struct address_block *block,
121 int n_blocks,
122 uint64_t size_at_least,
123 uint64_t offset)
124 {
125 int index = -1;
126 uint64_t size_at_index = 0;
127 int i;
128
129 address_space_assert(n_blocks >= 1);
130
131 for (i = 0; i < n_blocks; ++i, ++block) {
132 uint64_t this_size = block->size;
133 address_space_assert(this_size > 0);
134
135 if (this_size >= size_at_least && block->available &&
136 offset >= block->offset &&
137 offset < block->offset + this_size) {
138 index = i;
139 size_at_index = this_size;
140 return index;
141 }
142 }
143
144 return index;
145 }
146
147 static int
address_space_allocator_grow_capacity(int old_capacity)148 address_space_allocator_grow_capacity(int old_capacity) {
149 address_space_assert(old_capacity >= 1);
150
151 return old_capacity + old_capacity;
152 }
153
154 /* Inserts one more address block right after i'th (by borrowing i'th size) and
155 * adjusts sizes:
156 * pre:
157 * size > blocks[i].size
158 *
159 * post:
160 * * might reallocate allocator->blocks if there is no capacity to insert one
161 * * blocks[i].size -= size;
162 * * blocks[i+1].size = size;
163 */
164 static struct address_block*
address_space_allocator_split_block(struct address_space_allocator * allocator,int i,uint64_t size)165 address_space_allocator_split_block(
166 struct address_space_allocator *allocator,
167 int i,
168 uint64_t size)
169 {
170 address_space_assert(allocator->capacity >= 1);
171 address_space_assert(allocator->size >= 1);
172 address_space_assert(allocator->size <= allocator->capacity);
173 address_space_assert(i >= 0);
174 address_space_assert(i < allocator->size);
175 address_space_assert(size < allocator->blocks[i].size);
176
177 if (allocator->size == allocator->capacity) {
178 int new_capacity = address_space_allocator_grow_capacity(allocator->capacity);
179 allocator->blocks =
180 (struct address_block*)
181 address_space_realloc(
182 allocator->blocks,
183 sizeof(struct address_block) * new_capacity);
184 address_space_assert(allocator->blocks);
185 allocator->capacity = new_capacity;
186 }
187
188 struct address_block *blocks = allocator->blocks;
189
190 /* size = 5, i = 1
191 * [ 0 | 1 | 2 | 3 | 4 ] => [ 0 | 1 | new | 2 | 3 | 4 ]
192 * i (i+1) (i+2) i (i+1) (i+2)
193 */
194 memmove(&blocks[i + 2], &blocks[i + 1],
195 sizeof(struct address_block) * (allocator->size - i - 1));
196
197 struct address_block *to_borrow_from = &blocks[i];
198 struct address_block *new_block = to_borrow_from + 1;
199
200 uint64_t new_size = to_borrow_from->size - size;
201
202 to_borrow_from->size = new_size;
203
204 new_block->offset = to_borrow_from->offset + new_size;
205 new_block->size = size;
206 new_block->available = 1;
207
208 ++allocator->size;
209
210 return new_block;
211 }
212
213 /* Inserts one more address block right after i'th (by borrowing i'th size) and
214 * adjusts sizes:
215 * pre:
216 * size < blocks[i].size
217 * offset >= blocks[i].offset
218 * offset < blocks[i].offset + blocks[i].size
219 *
220 * post:
221 * * might reallocate allocator->blocks if there is no capacity to insert one
222 * * blocks[i].size = offset - blocks[i].offset;
223 * * blocks[i+1].size = offset;
224 * * blocks[i+1].offset = offset;
225 */
226 static struct address_block*
address_space_allocator_split_block_at_offset(struct address_space_allocator * allocator,int i,uint64_t size,uint64_t offset)227 address_space_allocator_split_block_at_offset(
228 struct address_space_allocator *allocator,
229 int i,
230 uint64_t size,
231 uint64_t offset)
232 {
233 uint64_t old_block_size;
234 bool need_extra_block;
235 int new_block_index_offset;
236
237 address_space_assert(allocator->capacity >= 1);
238 address_space_assert(allocator->size >= 1);
239 address_space_assert(allocator->size <= allocator->capacity);
240 address_space_assert(i >= 0);
241 address_space_assert(i < allocator->size);
242 address_space_assert(size < allocator->blocks[i].size);
243
244 need_extra_block =
245 (allocator->blocks[i].size - size - (offset - allocator->blocks[i].offset)) != 0;
246 new_block_index_offset = need_extra_block ? 1 : 0;
247
248 if (allocator->size + new_block_index_offset >= allocator->capacity) {
249 int new_capacity = address_space_allocator_grow_capacity(allocator->capacity + new_block_index_offset);
250 allocator->blocks =
251 (struct address_block*)
252 address_space_realloc(
253 allocator->blocks,
254 sizeof(struct address_block) * new_capacity);
255 address_space_assert(allocator->blocks);
256 allocator->capacity = new_capacity;
257 }
258
259 struct address_block *blocks = allocator->blocks;
260
261 /* size = 5, i = 1
262 * [ 0 | 1 | 2 | 3 | 4 ] => [ 0 | 1 | new | new(for slop) | 2 | 3 | 4]
263 * i (i+1) (i+2) i (i+1) (i+2) (i+3)
264 */
265 memmove(&blocks[i + 2 + new_block_index_offset], &blocks[i + 1],
266 sizeof(struct address_block) * (allocator->size - i - 1));
267
268 struct address_block *to_borrow_from = &blocks[i];
269 struct address_block *new_block = to_borrow_from + 1;
270 struct address_block *extra_block = to_borrow_from + 2;
271
272 old_block_size = to_borrow_from->size;
273 to_borrow_from->size = offset - to_borrow_from->offset;
274
275 new_block->offset = offset;
276 new_block->size = size;
277 new_block->available = 1;
278
279 ++allocator->size;
280
281 if (need_extra_block) {
282 extra_block->offset = offset + size;
283 extra_block->size = old_block_size - size - to_borrow_from->size;
284 extra_block->available = 1;
285
286 ++allocator->size;
287 }
288
289 return new_block;
290 }
291
292 /* Marks i'th block as available. If adjacent ((i-1) and (i+1)) blocks are also
293 * available, it merges i'th block with them.
294 * pre:
295 * i < allocator->size
296 * post:
297 * i'th block is merged with adjacent ones if they are available, blocks that
298 * were merged from are removed. allocator->size is updated if blocks were
299 * removed.
300 */
address_space_allocator_release_block(struct address_space_allocator * allocator,int i)301 static void address_space_allocator_release_block(
302 struct address_space_allocator *allocator,
303 int i)
304 {
305 struct address_block *blocks = allocator->blocks;
306 int before = i - 1;
307 int after = i + 1;
308 int size = allocator->size;
309
310 address_space_assert(i >= 0);
311 address_space_assert(i < size);
312
313 blocks[i].available = 1;
314
315 if (before >= 0 && blocks[before].available) {
316 if (after < size && blocks[after].available) {
317 // merge (before, i, after) into before
318 blocks[before].size += (blocks[i].size + blocks[after].size);
319
320 size -= 2;
321 memmove(&blocks[i], &blocks[i + 2],
322 sizeof(struct address_block) * (size - i));
323 allocator->size = size;
324 } else {
325 // merge (before, i) into before
326 blocks[before].size += blocks[i].size;
327
328 --size;
329 memmove(&blocks[i], &blocks[i + 1],
330 sizeof(struct address_block) * (size - i));
331 allocator->size = size;
332 }
333 } else if (after < size && blocks[after].available) {
334 // merge (i, after) into i
335 blocks[i].size += blocks[after].size;
336
337 --size;
338 memmove(&blocks[after], &blocks[after + 1],
339 sizeof(struct address_block) * (size - after));
340 allocator->size = size;
341 }
342
343 }
344
345 /* Takes a size to allocate an address block and returns an offset where this
346 * block is allocated. This block will not be available for other callers unless
347 * it is explicitly deallocated (see address_space_allocator_deallocate below).
348 */
address_space_allocator_allocate(struct address_space_allocator * allocator,uint64_t size)349 static uint64_t address_space_allocator_allocate(
350 struct address_space_allocator *allocator,
351 uint64_t size)
352 {
353 int i = address_space_allocator_find_available_block(allocator->blocks,
354 allocator->size,
355 size);
356 if (i < 0) {
357 return ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET;
358 } else {
359 address_space_assert(i < allocator->size);
360
361 struct address_block *block = &allocator->blocks[i];
362 address_space_assert(block->size >= size);
363
364 if (block->size > size) {
365 block = address_space_allocator_split_block(allocator, i, size);
366 }
367
368 address_space_assert(block->size == size);
369 block->available = 0;
370
371 return block->offset;
372 }
373 }
374
375 /* Takes a size to allocate an address block and returns an offset where this
376 * block is allocated. This block will not be available for other callers unless
377 * it is explicitly deallocated (see address_space_allocator_deallocate below).
378 */
address_space_allocator_allocate_fixed(struct address_space_allocator * allocator,uint64_t size,uint64_t offset)379 static int address_space_allocator_allocate_fixed(
380 struct address_space_allocator *allocator,
381 uint64_t size,
382 uint64_t offset)
383 {
384 int i = address_space_allocator_find_available_block_at_offset(
385 allocator->blocks,
386 allocator->size,
387 size,
388 offset);
389
390 if (i < 0) {
391 return -1;
392 } else {
393 address_space_assert(i < allocator->size);
394
395 struct address_block *block = &allocator->blocks[i];
396 address_space_assert(block->size >= size);
397
398 if (block->size > size) {
399 block = address_space_allocator_split_block_at_offset(allocator, i, size, offset);
400 }
401
402 address_space_assert(block->size == size);
403 block->available = 0;
404
405 return 0;
406 }
407 }
408
409 /* Takes an offset returned from address_space_allocator_allocate ealier
410 * (see above) and marks this block as available for further allocation.
411 */
address_space_allocator_deallocate(struct address_space_allocator * allocator,uint64_t offset)412 static uint32_t address_space_allocator_deallocate(
413 struct address_space_allocator *allocator,
414 uint64_t offset)
415 {
416 struct address_block *block = allocator->blocks;
417 int size = allocator->size;
418 int i;
419
420 address_space_assert(size >= 1);
421
422 for (i = 0; i < size; ++i, ++block) {
423 if (block->offset == offset) {
424 if (block->available) {
425 return EINVAL;
426 } else {
427 address_space_allocator_release_block(allocator, i);
428 return 0;
429 }
430 }
431 }
432
433 return EINVAL;
434 }
435
436 /* Creates a seed block. */
address_space_allocator_init(struct address_space_allocator * allocator,uint64_t size,int initial_capacity)437 static void address_space_allocator_init(
438 struct address_space_allocator *allocator,
439 uint64_t size,
440 int initial_capacity)
441 {
442 address_space_assert(initial_capacity >= 1);
443
444 allocator->blocks =
445 (struct address_block*)
446 malloc(sizeof(struct address_block) * initial_capacity);
447 memset(allocator->blocks, 0, sizeof(struct address_block) * initial_capacity);
448 address_space_assert(allocator->blocks);
449
450 struct address_block *block = allocator->blocks;
451
452 block->offset = 0;
453 block->size = size;
454 block->available = 1;
455
456 allocator->size = 1;
457 allocator->capacity = initial_capacity;
458 allocator->total_bytes = size;
459 }
460
461 /* Destroy function if we don't care what was previoulsy allocated.
462 * have been merged into one block.
463 */
address_space_allocator_destroy_nocleanup(struct address_space_allocator * allocator)464 static void address_space_allocator_destroy_nocleanup(
465 struct address_space_allocator *allocator)
466 {
467 address_space_free(allocator->blocks);
468 }
469
470 /* Resets the state of the allocator to the initial state without
471 * performing any dynamic memory management. */
address_space_allocator_reset(struct address_space_allocator * allocator)472 static void address_space_allocator_reset(
473 struct address_space_allocator *allocator)
474 {
475 address_space_assert(allocator->size >= 1);
476
477 allocator->size = 1;
478
479 struct address_block* block = allocator->blocks;
480 block->offset = 0;
481 block->size = allocator->total_bytes;
482 block->available = 1;
483 }
484
485 typedef void (*address_block_iter_func_t)(void* context, struct address_block*);
486 typedef void (*address_space_allocator_iter_func_t)(void* context, struct address_space_allocator*);
487
address_space_allocator_run(struct address_space_allocator * allocator,void * context,address_space_allocator_iter_func_t allocator_func,address_block_iter_func_t block_func)488 static void address_space_allocator_run(
489 struct address_space_allocator *allocator,
490 void* context,
491 address_space_allocator_iter_func_t allocator_func,
492 address_block_iter_func_t block_func)
493 {
494 struct address_block *block = 0;
495 int size;
496 int i;
497
498 allocator_func(context, allocator);
499
500 block = allocator->blocks;
501 size = allocator->size;
502
503 address_space_assert(size >= 1);
504
505 for (i = 0; i < size; ++i, ++block) {
506 block_func(context, block);
507 }
508 }
509
510 #ifdef ADDRESS_SPACE_NAMESPACE
511 }
512 #endif
513