1 // Copyright 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #pragma once
15
16 #include "android/utils/compiler.h"
17 #include <assert.h>
18 #include <errno.h>
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 ANDROID_BEGIN_HEADER
24
25 // This is ported from goldfish_address_space, allowing it to be used for
26 // general sub-allocations of any buffer range.
27 // It is also a pure header library, so there are no compiler tricks needed
28 // to use this in a particular implementation. please don't include this
29 // in a file that is included everywhere else, though.
30
31 /* Represents a continuous range of addresses and a flag if this block is
32 * available
33 */
34 struct address_block {
35 uint64_t offset;
36 uint64_t size : 63;
37 uint64_t available : 1;
38 };
39
40 /* A dynamic array of address blocks, with the following invariant:
41 * blocks[i].size > 0
42 * blocks[i+1].offset = blocks[i].offset + blocks[i].size
43 */
44 struct address_space_allocator {
45 struct address_block *blocks;
46 int size;
47 int capacity;
48 uint64_t total_bytes;
49 };
50
51 #define ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET (~(uint64_t)0)
52 #define ANDROID_EMU_ADDRESS_SPACE_DEFAULT_PAGE_SIZE 4096
53
54 /* The assert function to abort if something goes wrong. */
address_space_assert(bool condition)55 static void address_space_assert(bool condition) {
56 #ifdef ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC
57 ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC(condition);
58 #else
59 assert(condition);
60 #endif
61 }
62
address_space_malloc0(size_t size)63 void* address_space_malloc0(size_t size) {
64 #ifdef ANDROID_EMU_ADDRESS_SPACE_MALLOC0_FUNC
65 return ANDROID_EMU_ADDRESS_SPACE_MALLOC0_FUNC(size);
66 #else
67 void* res = malloc(size);
68 memset(res, 0, size);
69 return res;
70 #endif
71 }
72
address_space_realloc(void * ptr,size_t size)73 void* address_space_realloc(void* ptr, size_t size) {
74 #ifdef ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC
75 return ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC(ptr, size);
76 #else
77 void* res = realloc(ptr, size);
78 return res;
79 #endif
80 }
81
address_space_free(void * ptr)82 void address_space_free(void* ptr) {
83 #ifdef ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC
84 return ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC(ptr);
85 #else
86 free(ptr);
87 #endif
88 }
89
90 /* Looks for the smallest (to reduce fragmentation) available block with size to
91 * fit the requested amount and returns its index or -1 if none is available.
92 */
address_space_allocator_find_available_block(struct address_block * block,int n_blocks,uint64_t size_at_least)93 static int address_space_allocator_find_available_block(
94 struct address_block *block,
95 int n_blocks,
96 uint64_t size_at_least)
97 {
98 int index = -1;
99 uint64_t size_at_index = 0;
100 int i;
101
102 address_space_assert(n_blocks >= 1);
103
104 for (i = 0; i < n_blocks; ++i, ++block) {
105 uint64_t this_size = block->size;
106 address_space_assert(this_size > 0);
107
108 if (this_size >= size_at_least && block->available &&
109 (index < 0 || this_size < size_at_index)) {
110 index = i;
111 size_at_index = this_size;
112 }
113 }
114
115 return index;
116 }
117
118 static int
address_space_allocator_grow_capacity(int old_capacity)119 address_space_allocator_grow_capacity(int old_capacity) {
120 address_space_assert(old_capacity >= 1);
121
122 return old_capacity + old_capacity;
123 }
124
125 /* Inserts one more address block right after i'th (by borrowing i'th size) and
126 * adjusts sizes:
127 * pre:
128 * size > blocks[i].size
129 *
130 * post:
131 * * might reallocate allocator->blocks if there is no capacity to insert one
132 * * blocks[i].size -= size;
133 * * blocks[i+1].size = size;
134 */
135 static struct address_block*
address_space_allocator_split_block(struct address_space_allocator * allocator,int i,uint64_t size)136 address_space_allocator_split_block(
137 struct address_space_allocator *allocator,
138 int i,
139 uint64_t size)
140 {
141 address_space_assert(allocator->capacity >= 1);
142 address_space_assert(allocator->size >= 1);
143 address_space_assert(allocator->size <= allocator->capacity);
144 address_space_assert(i >= 0);
145 address_space_assert(i < allocator->size);
146 address_space_assert(size < allocator->blocks[i].size);
147
148 if (allocator->size == allocator->capacity) {
149 int new_capacity = address_space_allocator_grow_capacity(allocator->capacity);
150 allocator->blocks =
151 (struct address_block*)
152 address_space_realloc(
153 allocator->blocks,
154 sizeof(struct address_block) * new_capacity);
155 address_space_assert(allocator->blocks);
156 allocator->capacity = new_capacity;
157 }
158
159 struct address_block *blocks = allocator->blocks;
160
161 /* size = 5, i = 1
162 * [ 0 | 1 | 2 | 3 | 4 ] => [ 0 | 1 | new | 2 | 3 | 4 ]
163 * i (i+1) (i+2) i (i+1) (i+2)
164 */
165 memmove(&blocks[i + 2], &blocks[i + 1],
166 sizeof(struct address_block) * (allocator->size - i - 1));
167
168 struct address_block *to_borrow_from = &blocks[i];
169 struct address_block *new_block = to_borrow_from + 1;
170
171 uint64_t new_size = to_borrow_from->size - size;
172
173 to_borrow_from->size = new_size;
174
175 new_block->offset = to_borrow_from->offset + new_size;
176 new_block->size = size;
177 new_block->available = 1;
178
179 ++allocator->size;
180
181 return new_block;
182 }
183
184 /* Marks i'th block as available. If adjacent ((i-1) and (i+1)) blocks are also
185 * available, it merges i'th block with them.
186 * pre:
187 * i < allocator->size
188 * post:
189 * i'th block is merged with adjacent ones if they are available, blocks that
190 * were merged from are removed. allocator->size is updated if blocks were
191 * removed.
192 */
address_space_allocator_release_block(struct address_space_allocator * allocator,int i)193 static void address_space_allocator_release_block(
194 struct address_space_allocator *allocator,
195 int i)
196 {
197 struct address_block *blocks = allocator->blocks;
198 int before = i - 1;
199 int after = i + 1;
200 int size = allocator->size;
201
202 address_space_assert(i >= 0);
203 address_space_assert(i < size);
204
205 blocks[i].available = 1;
206
207 if (before >= 0 && blocks[before].available) {
208 if (after < size && blocks[after].available) {
209 // merge (before, i, after) into before
210 blocks[before].size += (blocks[i].size + blocks[after].size);
211
212 size -= 2;
213 memmove(&blocks[i], &blocks[i + 2],
214 sizeof(struct address_block) * (size - i));
215 allocator->size = size;
216 } else {
217 // merge (before, i) into before
218 blocks[before].size += blocks[i].size;
219
220 --size;
221 memmove(&blocks[i], &blocks[i + 1],
222 sizeof(struct address_block) * (size - i));
223 allocator->size = size;
224 }
225 } else if (after < size && blocks[after].available) {
226 // merge (i, after) into i
227 blocks[i].size += blocks[after].size;
228
229 --size;
230 memmove(&blocks[after], &blocks[after + 1],
231 sizeof(struct address_block) * (size - after));
232 allocator->size = size;
233 }
234
235 }
236
237 /* Takes a size to allocate an address block and returns an offset where this
238 * block is allocated. This block will not be available for other callers unless
239 * it is explicitly deallocated (see address_space_allocator_deallocate below).
240 */
address_space_allocator_allocate(struct address_space_allocator * allocator,uint64_t size)241 static uint64_t address_space_allocator_allocate(
242 struct address_space_allocator *allocator,
243 uint64_t size)
244 {
245 int i = address_space_allocator_find_available_block(allocator->blocks,
246 allocator->size,
247 size);
248 if (i < 0) {
249 return ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET;
250 } else {
251 address_space_assert(i < allocator->size);
252
253 struct address_block *block = &allocator->blocks[i];
254 address_space_assert(block->size >= size);
255
256 if (block->size > size) {
257 block = address_space_allocator_split_block(allocator, i, size);
258 }
259
260 address_space_assert(block->size == size);
261 block->available = 0;
262
263 return block->offset;
264 }
265 }
266
267 /* Takes an offset returned from address_space_allocator_allocate ealier
268 * (see above) and marks this block as available for further allocation.
269 */
address_space_allocator_deallocate(struct address_space_allocator * allocator,uint64_t offset)270 static uint32_t address_space_allocator_deallocate(
271 struct address_space_allocator *allocator,
272 uint64_t offset)
273 {
274 struct address_block *block = allocator->blocks;
275 int size = allocator->size;
276 int i;
277
278 address_space_assert(size >= 1);
279
280 for (i = 0; i < size; ++i, ++block) {
281 if (block->offset == offset) {
282 if (block->available) {
283 return EINVAL;
284 } else {
285 address_space_allocator_release_block(allocator, i);
286 return 0;
287 }
288 }
289 }
290
291 return EINVAL;
292 }
293
294 /* Creates a seed block. */
address_space_allocator_init(struct address_space_allocator * allocator,uint64_t size,int initial_capacity)295 static void address_space_allocator_init(
296 struct address_space_allocator *allocator,
297 uint64_t size,
298 int initial_capacity)
299 {
300 address_space_assert(initial_capacity >= 1);
301
302 allocator->blocks =
303 (struct address_block*)
304 address_space_malloc0(sizeof(struct address_block) * initial_capacity);
305 address_space_assert(allocator->blocks);
306
307 struct address_block *block = allocator->blocks;
308
309 block->offset = 0;
310 block->size = size;
311 block->available = 1;
312
313 allocator->size = 1;
314 allocator->capacity = initial_capacity;
315 allocator->total_bytes = size;
316 }
317
318 /* At this point there should be no used blocks and all available blocks must
319 * have been merged into one block.
320 */
address_space_allocator_destroy(struct address_space_allocator * allocator)321 static void address_space_allocator_destroy(
322 struct address_space_allocator *allocator)
323 {
324 address_space_assert(allocator->size == 1);
325 address_space_assert(allocator->capacity >= allocator->size);
326 address_space_assert(allocator->blocks[0].available);
327 address_space_free(allocator->blocks);
328 }
329
330 /* Resets the state of the allocator to the initial state without
331 * performing any dynamic memory management. */
address_space_allocator_reset(struct address_space_allocator * allocator)332 static void address_space_allocator_reset(
333 struct address_space_allocator *allocator)
334 {
335 address_space_assert(allocator->size >= 1);
336
337 allocator->size = 1;
338
339 struct address_block* block = allocator->blocks;
340 block->offset = 0;
341 block->size = allocator->total_bytes;
342 block->available = 1;
343 }
344
345 ANDROID_END_HEADER
346