• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015, Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <err.h>
25 #include <kernel/thread.h>
26 #include <kernel/vm.h>
27 #include <lib/mmutest/mmutest.h>
28 #include <lib/unittest/unittest.h>
29 #include <lk/init.h>
30 #include <pow2.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <string.h>
34 
35 /*
36  * These below declarations are made to avoid issues with CFI
37  * while copying heap allocated method, this is to reduce the
38  * probability of it breaking in future toolchain versions
39  */
40 extern uint8_t mmutest_arch_nop[];
41 extern uint8_t mmutest_arch_nop_end[];
42 
mmutest_run_in_thread(const char * thread_name,int (* func)(void * arg),void * arg)43 static int mmutest_run_in_thread(const char* thread_name,
44                                  int (*func)(void* arg),
45                                  void* arg) {
46     int ret;
47     int thread_ret;
48     struct thread* thread;
49     uint8_t* canary;
50     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
51 
52     thread = thread_create("mmu_test_execute", func, arg, DEFAULT_PRIORITY,
53                            DEFAULT_STACK_SIZE);
54     if (!thread) {
55         return ERR_NO_MEMORY;
56     }
57 
58     canary = (uint8_t*)thread->stack - PAGE_SIZE * 2;
59 
60     ret = vmm_alloc(aspace, "canary", PAGE_SIZE, (void**)&canary, 0,
61                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
62     if (ret) {
63         canary = NULL;
64     } else {
65         memset(canary, 0x55, PAGE_SIZE);
66     }
67 
68     thread_set_flag_exit_on_panic(thread, true);
69     ret = thread_resume(thread);
70     if (ret) {
71         return ret;
72     }
73 
74     ret = thread_join(thread, &thread_ret, INFINITE_TIME);
75     if (ret) {
76         return ret;
77     }
78 
79     if (canary) {
80         size_t i;
81         for (i = 0; i < PAGE_SIZE; i++) {
82             if (canary[i] != 0x55)
83                 break;
84         }
85         EXPECT_EQ(i, PAGE_SIZE, "memory below stack corrupted\n");
86 
87         vmm_free_region(aspace, (vaddr_t)canary);
88     }
89 
90     return thread_ret;
91 }
92 
mmutest_alloc(void ** ptrp,uint arch_mmu_flags)93 static int mmutest_alloc(void** ptrp, uint arch_mmu_flags) {
94     int ret;
95     uint arch_mmu_flags_query = ~0U;
96     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
97 
98     ret = vmm_alloc_contiguous(aspace, "mmutest", PAGE_SIZE, ptrp, 0, 0,
99                                arch_mmu_flags);
100 
101     EXPECT_EQ(NO_ERROR, ret, "vmm_alloc_contiguous failed\n");
102     if (ret) {
103         return ret;
104     }
105 
106     arch_mmu_query(&aspace->arch_aspace, (vaddr_t)*ptrp, NULL,
107                    &arch_mmu_flags_query);
108     EXPECT_EQ(arch_mmu_flags_query, arch_mmu_flags,
109               "arch_mmu_query, 0x%x, does not match requested flags, 0x%x\n",
110               arch_mmu_flags_query, arch_mmu_flags);
111     return 0;
112 }
113 
mmutest_vmm_store_uint32(uint arch_mmu_flags,bool user)114 static int mmutest_vmm_store_uint32(uint arch_mmu_flags, bool user) {
115     int ret;
116     void* ptr;
117 
118     ret = mmutest_alloc(&ptr, arch_mmu_flags);
119     if (ret) {
120         return ret;
121     }
122 
123     ret = mmutest_arch_store_uint32(ptr, user);
124 
125     vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)ptr);
126     return ret;
127 }
128 
mmutest_vmm_store_uint32_kernel(uint arch_mmu_flags)129 static int mmutest_vmm_store_uint32_kernel(uint arch_mmu_flags) {
130     return mmutest_vmm_store_uint32(arch_mmu_flags, false);
131 }
132 
mmutest_vmm_store_uint32_user(uint arch_mmu_flags)133 static int mmutest_vmm_store_uint32_user(uint arch_mmu_flags) {
134     return mmutest_vmm_store_uint32(arch_mmu_flags, true);
135 }
136 
137 /*
138  * disabling the cfi-icall as a workaround to avoid cfi check
139  * failure errors while calling heap allocated functions
140  */
mmu_test_execute_thread_func(void * arg)141 static int mmu_test_execute_thread_func(void* arg)
142         __attribute__((no_sanitize("cfi-icall"))) {
143     void (*func)(void) = arg;
144     func();
145     return 0;
146 }
147 
148 /*
149  * Executes 'mmutest_arch_nop' code from a memory mapped with the passed flags.
150  * To simplify test writing, this first creates a writable allocation and vmm
151  * mapping before making a second mapping with the requested arch_mmu_flags and
152  * executing the test thread.  This avoids violating W^X semantics which are
153  * enforced on some architectures.
154  */
mmu_test_execute(uint arch_mmu_flags)155 static int mmu_test_execute(uint arch_mmu_flags) {
156     const size_t len = mmutest_arch_nop_end - mmutest_arch_nop;
157     const size_t alloc_len = round_up(len, PAGE_SIZE);
158     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
159     struct obj_ref vmm_obj_ref = OBJ_REF_INITIAL_VALUE(vmm_obj_ref);
160     struct vmm_obj* vmm_obj = NULL;
161     void *ptr = NULL, *execute_ptr = NULL;
162     uint arch_mmu_flags_query;
163     int ret;
164 
165     /* Allocate pages to hold the test code and create writable mapping */
166     ret = pmm_alloc(&vmm_obj, &vmm_obj_ref, alloc_len / PAGE_SIZE,
167                     PMM_ALLOC_FLAG_CONTIGUOUS, 0);
168     ASSERT_EQ(NO_ERROR, ret, "pmm_alloc failed\n");
169 
170     ret = vmm_alloc_obj(aspace, "mmutest_w", vmm_obj, 0, alloc_len, &ptr, 0, 0,
171                         ARCH_MMU_FLAG_PERM_NO_EXECUTE);
172     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc_obj failed\n");
173 
174     /* Populate the memory */
175     memcpy(ptr, mmutest_arch_nop, len);
176     arch_sync_cache_range((addr_t)ptr, len);
177 
178     /* Now create a new mapping with the desired test arch_mmu_flags */
179     ret = vmm_alloc_obj(aspace, "mmutest_flags", vmm_obj, 0, alloc_len,
180                         &execute_ptr, 0, 0, arch_mmu_flags);
181     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc_obj failed\n");
182 
183     /* Ensure the new mapping reflects the initialised memory */
184     EXPECT_EQ(0, memcmp(ptr, execute_ptr, alloc_len),
185               "mapping contents mismatch\n");
186 
187     /* Double check the flags are as expected on the new memory */
188     arch_mmu_query(&aspace->arch_aspace, (vaddr_t)execute_ptr, NULL,
189                    &arch_mmu_flags_query);
190     ASSERT_EQ(arch_mmu_flags_query, arch_mmu_flags,
191               "arch_mmu_query, 0x%x, does not match requested flags, 0x%x\n",
192               arch_mmu_flags_query, arch_mmu_flags);
193 
194     /* Execute the test */
195     ret = mmutest_run_in_thread("mmu_test_execute",
196                                 mmu_test_execute_thread_func, execute_ptr);
197 
198 test_abort:
199     if (execute_ptr) {
200         int tmp_ret = vmm_free_region(aspace, (vaddr_t)execute_ptr);
201         EXPECT_EQ(NO_ERROR, tmp_ret, "vmm_free_region failed\n");
202     }
203 
204     if (ptr) {
205         int tmp_ret = vmm_free_region(aspace, (vaddr_t)ptr);
206         EXPECT_EQ(NO_ERROR, tmp_ret, "vmm_free_region failed\n");
207     }
208 
209     if (vmm_obj) {
210         vmm_obj_del_ref(vmm_obj, &vmm_obj_ref);
211     }
212 
213     return ret;
214 }
215 
216 /* Skip kernel permission tests on ARM as it uses 1MB mappings */
217 #if ARCH_ARM
218 #define DISABLED_ON_ARM_NAME(name) DISABLED_##name
219 #else
220 #define DISABLED_ON_ARM_NAME(name) name
221 #endif
222 
223 typedef struct {
224     vmm_aspace_t* aspace;
225     size_t allocation_size;
226 } mmutestvmm_t;
227 
TEST_F_SETUP(mmutestvmm)228 TEST_F_SETUP(mmutestvmm) {
229     int ret;
230     const void* const* params = GetParam();
231     const size_t* allocation_size_p = params[0];
232     const bool* is_kernel_aspace = params[1];
233 
234     _state->allocation_size = *allocation_size_p;
235     if (*is_kernel_aspace) {
236         _state->aspace = vmm_get_kernel_aspace();
237     } else {
238         ret = vmm_create_aspace(&_state->aspace, "mmutestvmm", 0);
239         ASSERT_EQ(NO_ERROR, ret);
240     }
241 
242     ASSERT_GE(_state->allocation_size, PAGE_SIZE);
243     ASSERT_LT(_state->allocation_size, _state->aspace->size);
244 test_abort:;
245 }
246 
247 static size_t mmutestvmm_allocation_sizes[] = {
248         PAGE_SIZE,
249         2 * 1024 * 1024, /* large enough to use section/block mapping on arm */
250 };
251 
TEST_F_TEARDOWN(mmutestvmm)252 TEST_F_TEARDOWN(mmutestvmm) {
253     if (!(_state->aspace->flags & VMM_ASPACE_FLAG_KERNEL)) {
254         vmm_free_aspace(_state->aspace);
255     }
256 }
257 
258 /* Smoke test for vmm_alloc */
TEST_P(mmutestvmm,vmm_alloc)259 TEST_P(mmutestvmm, vmm_alloc) {
260     int ret;
261     void* ptr = NULL;
262     ret = vmm_alloc(_state->aspace, "mmutest", _state->allocation_size, &ptr, 0,
263                     0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
264     EXPECT_EQ(NO_ERROR, ret);
265     EXPECT_NE(NULL, ptr);
266     ret = vmm_free_region(_state->aspace, (vaddr_t)ptr);
267     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
268 }
269 
270 /* Smoke test for vmm_alloc_contiguous */
TEST_P(mmutestvmm,vmm_alloc_contiguous)271 TEST_P(mmutestvmm, vmm_alloc_contiguous) {
272     int ret;
273     void* ptr = NULL;
274     ret = vmm_alloc_contiguous(_state->aspace, "mmutest",
275                                _state->allocation_size, &ptr,
276                                log2_uint(_state->allocation_size), 0,
277                                ARCH_MMU_FLAG_PERM_NO_EXECUTE);
278     EXPECT_EQ(NO_ERROR, ret);
279     EXPECT_NE(NULL, ptr);
280     ret = vmm_free_region(_state->aspace, (vaddr_t)ptr);
281     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
282 }
283 
284 INSTANTIATE_TEST_SUITE_P(
285         allocationsize,
286         mmutestvmm,
287         testing_Combine(testing_ValuesIn(mmutestvmm_allocation_sizes),
288                         /* user(false) and kernel(true) aspaces */
289                         testing_Bool()));
290 
mmutest_panic_thread_func(void * _unused)291 static int mmutest_panic_thread_func(void* _unused) {
292     panic("mmutest-panic");
293 }
294 
TEST(mmutest,panic)295 TEST(mmutest, panic) {
296     /* Check thread_set_flag_exit_on_panic feature needed by other tests */
297     int ret = mmutest_run_in_thread("mmutest-panic", mmutest_panic_thread_func,
298                                     NULL);
299     EXPECT_EQ(ERR_FAULT, ret);
300 }
301 
mmutest_panic_thread_lock_thread_func(void * _unused)302 static int mmutest_panic_thread_lock_thread_func(void* _unused) {
303     THREAD_LOCK(state);
304     panic("mmutest-panic-thread-lock");
305 }
306 
TEST(mmutest,panic_thread_lock)307 TEST(mmutest, panic_thread_lock) {
308     /*
309      * Test panic with thread locked. Both _panic and platform_halt locks the
310      * thread_lock, so _panic needs to release it if it was already held by the
311      * current CPU.
312      */
313     int ret =
314             mmutest_run_in_thread("mmutest-panic-thread-lock",
315                                   mmutest_panic_thread_lock_thread_func, NULL);
316     EXPECT_EQ(ERR_FAULT, ret);
317 }
318 
TEST(mmutest,alloc_last_kernel_page)319 TEST(mmutest, alloc_last_kernel_page) {
320     int ret;
321     void* ptr1;
322     void* ptr2;
323     void* ptr3;
324     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
325     struct vmm_obj_slice slice;
326     vmm_obj_slice_init(&slice);
327 
328     /*
329      * Perform allocations at a specific address and at a vmm chosen address
330      * with and without the last page allocated. There are different code paths
331      * in the vmm allocator where the virtual address can overflow for the
332      * region that is being allocated and for regions already allocated.
333      */
334 
335     /* Allocate last kernel aspace page. */
336     ptr1 = (void*)(aspace->base + (aspace->size - PAGE_SIZE));
337     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
338                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
339                             VMM_FLAG_NO_END_GUARD,
340                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
341     /* TODO: allow this to fail as page could already be in use */
342     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc failed last page\n");
343 
344     /* While the last page is allocated, get an object corresponding to it */
345     ret = vmm_get_obj(aspace, (vaddr_t)ptr1, PAGE_SIZE, &slice);
346     EXPECT_EQ(NO_ERROR, ret, "vmm_get_obj failed to get last page object");
347     /* Check the slice we got back */
348     EXPECT_NE(NULL, slice.obj);
349     EXPECT_EQ(PAGE_SIZE, slice.size);
350     EXPECT_EQ(NO_ERROR, slice.offset);
351     vmm_obj_slice_release(&slice);
352 
353     /* Allocate page anywhere, while the last page is allocated. */
354     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0, 0,
355                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
356     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc failed anywhere page\n");
357 
358     /* Try to allocate last kernel aspace page again, should fail */
359     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
360                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
361                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
362     EXPECT_EQ(ERR_NO_MEMORY, ret, "vmm_alloc last page\n");
363 
364     /* Allocate 2nd last kernel aspace page, while last page is allocated. */
365     ptr3 = (void*)(aspace->base + (aspace->size - 2 * PAGE_SIZE));
366     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr3, 0,
367                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
368                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
369     /* TODO: allow this to fail as page could already be in use */
370     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc failed 2nd last page\n");
371 
372     /* Free allocated pages */
373     ret = vmm_free_region(aspace, (vaddr_t)ptr1);
374     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
375     ret = vmm_free_region(aspace, (vaddr_t)ptr2);
376     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
377     ret = vmm_free_region(aspace, (vaddr_t)ptr3);
378     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
379 
380     /* Try to allocate last page without VMM_FLAG_NO_END_GUARD flag */
381     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
382                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
383     ASSERT_EQ(ERR_OUT_OF_RANGE, ret, "vmm_alloc succeeded unexpectedly\n");
384 
385     /* Allocate and free last page */
386     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
387                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
388                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
389     /* TODO: allow this to fail as page could be in use */
390     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc failed last page\n");
391     ret = vmm_free_region(aspace, (vaddr_t)ptr1);
392     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
393 
394     /* Allocate and free page anywhere, while last page is free */
395     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0, 0,
396                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
397     ASSERT_EQ(NO_ERROR, ret, "vmm_alloc failed anywhere page\n");
398     ret = vmm_free_region(aspace, (vaddr_t)ptr2);
399     EXPECT_EQ(NO_ERROR, ret, "vmm_free_region failed\n");
400 
401 test_abort:;
402 }
403 
404 typedef struct {
405     vmm_aspace_t* aspace;
406 } mmutestaspace_t;
407 
TEST_F_SETUP(mmutestaspace)408 TEST_F_SETUP(mmutestaspace) {
409     int ret;
410     const bool* is_kernel_aspace = GetParam();
411 
412     if (*is_kernel_aspace) {
413         _state->aspace = vmm_get_kernel_aspace();
414     } else {
415         ret = vmm_create_aspace(&_state->aspace, "mmutestaspace", 0);
416         ASSERT_EQ(NO_ERROR, ret);
417     }
418 
419 test_abort:;
420 }
421 
TEST_F_TEARDOWN(mmutestaspace)422 TEST_F_TEARDOWN(mmutestaspace) {
423     if (!(_state->aspace->flags & VMM_ASPACE_FLAG_KERNEL)) {
424         vmm_free_aspace(_state->aspace);
425     }
426 }
427 
TEST_P(mmutestaspace,guard_page)428 TEST_P(mmutestaspace, guard_page) {
429     int ret;
430     bool retb;
431     vmm_aspace_t* aspace = _state->aspace;
432     size_t size = PAGE_SIZE * 6;
433     vaddr_t base;
434     void* ptr1 = NULL;
435     void* ptr2 = NULL;
436     void* ptr3 = NULL;
437     void* ptr4 = NULL;
438     void* ptr5 = NULL;
439     struct vmm_obj_slice slice;
440     vmm_obj_slice_init(&slice);
441 
442     /* Allocate a page at a random spot with guard pages. */
443     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0, 0,
444                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
445     ASSERT_EQ(NO_ERROR, ret);
446 
447     /*
448      * We may get an allocation right at the beginning of the address space
449      * by chance or because ASLR is disabled. In that case, we make another
450      * allocation to ensure that ptr1 - PAGE_SIZE >= aspace->base holds.
451      */
452     if (aspace->base > (vaddr_t)ptr1 - PAGE_SIZE) {
453         ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr3, 0, 0,
454                         ARCH_MMU_FLAG_PERM_NO_EXECUTE);
455         ASSERT_EQ(NO_ERROR, ret);
456         ASSERT_GE((vaddr_t)ptr3 - PAGE_SIZE, aspace->base);
457         vmm_free_region(aspace, (vaddr_t)ptr1);
458         ptr1 = ptr3;
459         ptr3 = NULL;
460     }
461 
462     /* Check that there are no existing adjacent allocations. */
463     ret = vmm_get_obj(aspace, (vaddr_t)ptr1 - PAGE_SIZE, PAGE_SIZE, &slice);
464     EXPECT_EQ(ERR_NOT_FOUND, ret);
465     vmm_obj_slice_release(&slice);
466 
467     ret = vmm_get_obj(aspace, (vaddr_t)ptr1 + PAGE_SIZE, PAGE_SIZE, &slice);
468     EXPECT_EQ(ERR_NOT_FOUND, ret);
469     vmm_obj_slice_release(&slice);
470 
471     /* Check that guard pages cannot be allocated. */
472     ptr2 = (void*)((vaddr_t)ptr1 - PAGE_SIZE);
473     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
474                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
475                             VMM_FLAG_NO_END_GUARD,
476                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
477     ASSERT_EQ(ERR_NO_MEMORY, ret);
478 
479     ptr2 = (void*)((vaddr_t)ptr1 + PAGE_SIZE);
480     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
481                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
482                             VMM_FLAG_NO_END_GUARD,
483                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
484     ASSERT_EQ(ERR_NO_MEMORY, ret);
485 
486     ptr2 = NULL;
487     vmm_free_region(aspace, (vaddr_t)ptr1);
488     ptr1 = NULL;
489 
490     /* Check that we cannot allocate at a random spot without guard page */
491     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
492                     VMM_FLAG_NO_START_GUARD | VMM_FLAG_NO_END_GUARD,
493                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
494     ASSERT_EQ(ERR_INVALID_ARGS, ret);
495 
496     /* Find a range to to more specific tests in. */
497     retb = vmm_find_spot(aspace, size, &base);
498     ASSERT_EQ(true, retb, "failed to find region for test\n");
499 
500     /* Allocate first test page. */
501     ptr1 = (void*)base;
502     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr1, 0,
503                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
504     if (ret) {
505         /*
506          * This allocation can fail if another thread allocated the page after
507          * vmm_find_spot returned as that call does not reserve the memory.
508          * Set ptr1 to NULL so we don't free memory belonging to someone else.
509          */
510         ptr1 = NULL;
511     }
512     ASSERT_EQ(NO_ERROR, ret);
513 
514     /* Test adjacent page. Should all fail as ptr1 has guard on both sides. */
515     ptr2 = (void*)(base + PAGE_SIZE);
516 
517     /* No flags. Should fail as both regions have a guard page. */
518     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
519                     VMM_FLAG_VALLOC_SPECIFIC, 0);
520     ASSERT_EQ(ERR_NO_MEMORY, ret);
521 
522     /* No start guard. Should fail as first region has a guard page. */
523     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
524                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD,
525                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
526     ASSERT_EQ(ERR_NO_MEMORY, ret);
527 
528     /* No end guard. Should fail as both regions have a guard page. */
529     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
530                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
531                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
532     ASSERT_EQ(ERR_NO_MEMORY, ret);
533 
534     /* No guard pages. Should fail as first region has a guard page. */
535     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
536                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
537                             VMM_FLAG_NO_END_GUARD,
538                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
539     ASSERT_EQ(ERR_NO_MEMORY, ret);
540 
541     /* Allocate page after guard page with no end guard */
542     ptr2 = (void*)(base + PAGE_SIZE * 2);
543     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr2, 0,
544                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
545                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
546     if (ret) {
547         ptr2 = NULL;
548     }
549     ASSERT_EQ(NO_ERROR, ret);
550 
551     /* Test page directly after ptr2 */
552     ptr3 = (void*)(base + PAGE_SIZE * 3);
553 
554     /* No flags. Should fail as second region has a guard page. */
555     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr3, 0,
556                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
557     ASSERT_EQ(ERR_NO_MEMORY, ret);
558 
559     /* No end guard. Should fail as second region has a guard page. */
560     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr3, 0,
561                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
562                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
563     ASSERT_EQ(ERR_NO_MEMORY, ret);
564 
565     /* No guard pages. Should succeed as neither region has a guard page. */
566     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr3, 0,
567                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
568                             VMM_FLAG_NO_END_GUARD,
569                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
570     if (ret) {
571         ptr3 = NULL;
572     }
573     ASSERT_EQ(NO_ERROR, ret);
574 
575     /* Test page directly after ptr3 */
576     ptr4 = (void*)(base + PAGE_SIZE * 4);
577 
578     /* No flags. Should fail as second region has a guard page. */
579     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr4, 0,
580                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
581     ASSERT_EQ(ERR_NO_MEMORY, ret);
582 
583     /* No end guard. Should fail as second region has a guard page. */
584     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr4, 0,
585                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
586                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
587     ASSERT_EQ(ERR_NO_MEMORY, ret);
588 
589     /* No start guard. Should succeed as neither region has a guard page. */
590     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr4, 0,
591                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD,
592                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
593     if (ret) {
594         ptr4 = NULL;
595     }
596     ASSERT_EQ(NO_ERROR, ret);
597 
598     /*
599      * Test page directly after ptr4. Should all fail as ptr4 has end guard.
600      * Similar the test after ptr1, but checks that disabling start guard does
601      * not affect end guard.
602      */
603     ptr5 = (void*)(base + PAGE_SIZE * 5);
604 
605     /* No flags. Should fail as both regions have a guard page. */
606     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr5, 0,
607                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
608     ASSERT_EQ(ERR_NO_MEMORY, ret);
609 
610     /* No start guard. Should fail as first region has a guard page. */
611     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr5, 0,
612                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD,
613                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
614     ASSERT_EQ(ERR_NO_MEMORY, ret);
615 
616     /* No end guard. Should fail as both regions have a guard page. */
617     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr5, 0,
618                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
619                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
620     ASSERT_EQ(ERR_NO_MEMORY, ret);
621 
622     /* No guard pages. Should fail as first region has a guard page. */
623     ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr5, 0,
624                     VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
625                             VMM_FLAG_NO_END_GUARD,
626                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
627     ASSERT_EQ(ERR_NO_MEMORY, ret);
628 
629     /*
630      * Clear ptr5 so we don't try to free it. Not strictly needed as the guard
631      * page around ptr4 will prevent anyone else from allocating memory at this
632      * location, and ptr5 is freed first below, but useful if vmm tracing is
633      * enabled as failing vmm_free_region calls should all be for vaddr 0.
634      */
635     ptr5 = NULL;
636 
637 test_abort:
638     vmm_free_region(aspace, (vaddr_t)ptr5);
639     vmm_free_region(aspace, (vaddr_t)ptr4);
640     vmm_free_region(aspace, (vaddr_t)ptr3);
641     vmm_free_region(aspace, (vaddr_t)ptr2);
642     vmm_free_region(aspace, (vaddr_t)ptr1);
643 }
644 
TEST_P(mmutestaspace,find_slice_no_guard)645 TEST_P(mmutestaspace, find_slice_no_guard) {
646     int ret;
647     bool retb;
648     vmm_aspace_t* aspace = _state->aspace;
649     void* ptr[8];
650     size_t num_regions = countof(ptr);
651     size_t size = PAGE_SIZE * num_regions;
652     vaddr_t base;
653     uint vmm_flags = VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD |
654                      VMM_FLAG_NO_END_GUARD;
655     struct vmm_obj_slice slice;
656     vmm_obj_slice_init(&slice);
657 
658     for (size_t i = 0; i < num_regions; i++) {
659         ptr[i] = NULL;
660     }
661 
662     retb = vmm_find_spot(aspace, size, &base);
663     ASSERT_EQ(true, retb, "failed to find region for test\n");
664 
665     for (int i = num_regions - 1; i >= 0; --i) {
666         ptr[i] = (void*)(base + PAGE_SIZE * i);
667         ret = vmm_alloc(aspace, "mmutest", PAGE_SIZE, &ptr[i], 0, vmm_flags,
668                         ARCH_MMU_FLAG_PERM_NO_EXECUTE);
669         if (ret) {
670             ptr[i] = NULL;
671         }
672 
673         if (ptr[i]) {
674             /* Test that we can find slice corresponding to allocated page. */
675             ret = vmm_get_obj(aspace, (vaddr_t)ptr[i], PAGE_SIZE, &slice);
676             ASSERT_EQ(NO_ERROR, ret);
677             vmm_obj_slice_release(&slice);
678         }
679     }
680 
681 test_abort:
682     for (size_t i = 0; i < num_regions; i++) {
683         vmm_free_region(aspace, (vaddr_t)ptr[i]);
684     }
685 }
686 
687 INSTANTIATE_TEST_SUITE_P(aspacetype,
688                          mmutestaspace,
689                          /* user(false) and kernel(true) aspaces */
690                          testing_Bool());
691 
TEST(mmutest,check_stack_guard_page_bad_ptr)692 TEST(mmutest, check_stack_guard_page_bad_ptr)
693 __attribute__((no_sanitize("bounds"))) {
694     char data[4];
695     void* ptr1 = data;
696     void* ptr2 = data - DEFAULT_STACK_SIZE;
697     EXPECT_EQ(NO_ERROR, mmutest_arch_store_uint32(ptr1, false));
698     EXPECT_EQ(ERR_GENERIC, mmutest_arch_store_uint32(ptr2, false));
699 }
700 
mmutest_stack_overflow_thread_func(void * arg)701 static int mmutest_stack_overflow_thread_func(void* arg) {
702     char data[DEFAULT_STACK_SIZE] __attribute((uninitialized));
703     void* ptr = data;
704     mmutest_arch_store_uint32(ptr, false);
705     return 0;
706 }
707 
TEST(mmutest,check_stack_guard_page_stack_overflow)708 TEST(mmutest, check_stack_guard_page_stack_overflow) {
709     EXPECT_EQ(ERR_FAULT,
710               mmutest_run_in_thread("stack-overflow",
711                                     mmutest_stack_overflow_thread_func, NULL));
712 }
713 
mmutest_recursive_stack_overflow_thread_func(void * arg)714 static int mmutest_recursive_stack_overflow_thread_func(void* arg) {
715     char b;
716     if ((vaddr_t)arg == 1) {
717         return 0;
718     }
719     return mmutest_recursive_stack_overflow_thread_func(&b) + 1;
720 }
721 
TEST(mmutest,check_stack_guard_page_recursive_stack_overflow)722 TEST(mmutest, check_stack_guard_page_recursive_stack_overflow) {
723     EXPECT_EQ(ERR_FAULT,
724               mmutest_run_in_thread(
725                       "stack-overflow",
726                       mmutest_recursive_stack_overflow_thread_func, 0));
727 }
728 
TEST(mmutest,DISABLED_ON_ARM_NAME (rodata_pnx))729 TEST(mmutest, DISABLED_ON_ARM_NAME(rodata_pnx)) {
730     EXPECT_EQ(ERR_FAULT, mmutest_arch_rodata_pnx());
731 }
732 
TEST(mmutest,DISABLED_ON_ARM_NAME (data_pnx))733 TEST(mmutest, DISABLED_ON_ARM_NAME(data_pnx)) {
734     EXPECT_EQ(ERR_FAULT, mmutest_arch_data_pnx());
735 }
736 
TEST(mmutest,DISABLED_ON_ARM_NAME (rodata_ro))737 TEST(mmutest, DISABLED_ON_ARM_NAME(rodata_ro)) {
738     EXPECT_EQ(ERR_FAULT, mmutest_arch_rodata_ro());
739 }
740 
TEST(mmutest,pan)741 TEST(mmutest, pan) {
742     if (!mmutest_arch_pan_supported()) {
743         trusty_unittest_printf("[   INFO   ] PAN is not supported\n");
744         GTEST_SKIP();
745     }
746     EXPECT_EQ(true, mmutest_arch_pan_enabled());
747 test_abort:;
748 }
749 
TEST(mmutest,store_kernel)750 TEST(mmutest, store_kernel) {
751     int expected_user_rw_access;
752     int expected_user_ro_access;
753 
754     if (mmutest_arch_pan_enabled()) {
755         expected_user_rw_access = ERR_GENERIC;
756         expected_user_ro_access = ERR_GENERIC;
757     } else {
758         expected_user_rw_access = 0;
759         expected_user_ro_access = ERR_FAULT;
760     }
761 
762     EXPECT_EQ(NO_ERROR,
763               mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
764                                               ARCH_MMU_FLAG_PERM_NO_EXECUTE));
765     EXPECT_EQ(expected_user_rw_access,
766               mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
767                                               ARCH_MMU_FLAG_PERM_NO_EXECUTE |
768                                               ARCH_MMU_FLAG_PERM_USER));
769     EXPECT_EQ(NO_ERROR,
770               mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
771                                               ARCH_MMU_FLAG_PERM_NO_EXECUTE));
772     EXPECT_EQ(expected_user_rw_access,
773               mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
774                                               ARCH_MMU_FLAG_PERM_NO_EXECUTE |
775                                               ARCH_MMU_FLAG_PERM_USER));
776     EXPECT_EQ(ERR_FAULT, mmutest_vmm_store_uint32_kernel(
777                                  ARCH_MMU_FLAG_CACHED | ARCH_MMU_FLAG_PERM_RO));
778     EXPECT_EQ(expected_user_ro_access,
779               mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
780                                               ARCH_MMU_FLAG_PERM_RO |
781                                               ARCH_MMU_FLAG_PERM_USER));
782 }
783 
TEST(mmutest,store_user)784 TEST(mmutest, store_user) {
785     EXPECT_EQ(ERR_GENERIC,
786               mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
787                                             ARCH_MMU_FLAG_PERM_NO_EXECUTE));
788     EXPECT_EQ(NO_ERROR,
789               mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
790                                             ARCH_MMU_FLAG_PERM_NO_EXECUTE |
791                                             ARCH_MMU_FLAG_PERM_USER));
792     EXPECT_EQ(ERR_GENERIC,
793               mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
794                                             ARCH_MMU_FLAG_PERM_NO_EXECUTE));
795     EXPECT_EQ(NO_ERROR,
796               mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
797                                             ARCH_MMU_FLAG_PERM_NO_EXECUTE |
798                                             ARCH_MMU_FLAG_PERM_USER));
799     EXPECT_EQ(ERR_GENERIC,
800               mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
801                                             ARCH_MMU_FLAG_PERM_RO));
802     EXPECT_EQ(ERR_FAULT, mmutest_vmm_store_uint32_user(
803                                  ARCH_MMU_FLAG_CACHED | ARCH_MMU_FLAG_PERM_RO |
804                                  ARCH_MMU_FLAG_PERM_USER));
805 }
806 
807 /*
808  * The current implementation of this test checks checks that the data is lost
809  * when reading back from memory, but allows the store to reach the cache. This
810  * is not the only allowed behavior and the emulator does not emulate this
811  * behavior, so disable this test for now.
812  */
TEST(mmutest,DISABLED_store_ns)813 TEST(mmutest, DISABLED_store_ns) {
814     EXPECT_EQ(2, mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
815                                                  ARCH_MMU_FLAG_NS));
816     EXPECT_EQ(2, mmutest_vmm_store_uint32_kernel(ARCH_MMU_FLAG_CACHED |
817                                                  ARCH_MMU_FLAG_NS |
818                                                  ARCH_MMU_FLAG_PERM_USER));
819     EXPECT_EQ(ERR_GENERIC, mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
820                                                          ARCH_MMU_FLAG_NS));
821     EXPECT_EQ(2, mmutest_vmm_store_uint32_user(ARCH_MMU_FLAG_CACHED |
822                                                ARCH_MMU_FLAG_NS |
823                                                ARCH_MMU_FLAG_PERM_USER));
824 }
825 
TEST(mmutest,run_x)826 TEST(mmutest, run_x) {
827     EXPECT_EQ(NO_ERROR, mmu_test_execute(ARCH_MMU_FLAG_PERM_RO));
828 }
829 
830 #if ARCH_ARM64
831 #include <arch/arm64/sregs.h>
832 
TEST(mmutest,run_wx)833 TEST(mmutest, run_wx) {
834     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
835     struct obj_ref vmm_obj_ref = OBJ_REF_INITIAL_VALUE(vmm_obj_ref);
836     struct vmm_obj* vmm_obj = NULL;
837     void* ptr = NULL;
838     int ret;
839 
840     /* Allocate a single page */
841     ret = pmm_alloc(&vmm_obj, &vmm_obj_ref, 1, PMM_ALLOC_FLAG_CONTIGUOUS, 0);
842     ASSERT_EQ(NO_ERROR, ret, "pmm_alloc failed\n");
843 
844     /* Try to map as w+x and check it fails */
845     ret = vmm_alloc_obj(aspace, "mmutest_wx", vmm_obj, 0, PAGE_SIZE, &ptr, 0, 0,
846                         0);
847     EXPECT_EQ(ERR_INVALID_ARGS, ret);
848 
849     /*
850      * ARM64 should have WXN enabled.
851      * This means that any writable page is NX irrespective of the PTE entry.
852      */
853     EXPECT_EQ(SCTLR_EL1_WXN, ARM64_READ_SYSREG(SCTLR_EL1) & SCTLR_EL1_WXN);
854 
855 test_abort:
856     if (vmm_obj) {
857         vmm_obj_del_ref(vmm_obj, &vmm_obj_ref);
858     }
859 }
860 #else
TEST(mmutest,run_wx)861 TEST(mmutest, run_wx) {
862     EXPECT_EQ(NO_ERROR, mmu_test_execute(0));
863 }
864 #endif
865 
TEST(mmutest,run_nx)866 TEST(mmutest, run_nx) {
867     EXPECT_EQ(ERR_FAULT, mmu_test_execute(ARCH_MMU_FLAG_PERM_NO_EXECUTE));
868 }
869 
870 /*
871  * Tests that allocations with conflicting NS bits are not allowed
872  * near each other
873  */
TEST(mmutest,ns_conflict)874 TEST(mmutest, ns_conflict) {
875     int ret;
876     void* ptr_ns = NULL;
877     void* ptr_s = NULL;
878     uint arch_mmu_flags_query, ns_flag;
879     vmm_aspace_t* aspace = vmm_get_kernel_aspace();
880 
881     /*
882      * Allocate a NS page with a 16K alignment to ensure that there
883      * is enough room after it in the 1MB section for both the guard page
884      * and the S page below.
885      */
886     ret = vmm_alloc(aspace, "ns_conflict_ns", PAGE_SIZE, &ptr_ns,
887                     PAGE_SIZE_SHIFT + 2, 0,
888                     ARCH_MMU_FLAG_NS | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
889     if (ret == ERR_NOT_SUPPORTED) {
890         GTEST_SKIP();
891     }
892     EXPECT_EQ(NO_ERROR, ret);
893 
894     ret = arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr_ns, NULL,
895                          &arch_mmu_flags_query);
896     EXPECT_EQ(NO_ERROR, ret);
897 
898     ns_flag = arch_mmu_flags_query & ARCH_MMU_FLAG_NS;
899     EXPECT_EQ(ARCH_MMU_FLAG_NS, ns_flag);
900 
901     /*
902      * Allocate an S page just after the previous one (plus the guard page).
903      * This should fail on arm32 because the kernel shouldn't let us mix the
904      * two kinds.
905      */
906     ptr_s = (uint8_t*)ptr_ns + 2 * PAGE_SIZE;
907     ret = vmm_alloc(aspace, "ns_conflict_s", PAGE_SIZE, &ptr_s, PAGE_SIZE_SHIFT,
908                     VMM_FLAG_VALLOC_SPECIFIC, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
909     if (ret) {
910         ptr_s = NULL;
911     } else {
912         ret = arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr_s, NULL,
913                              &arch_mmu_flags_query);
914         if (!ret) {
915             ns_flag = arch_mmu_flags_query & ARCH_MMU_FLAG_NS;
916             EXPECT_EQ(NO_ERROR, ns_flag);
917         }
918     }
919 
920 test_abort:
921     if (ptr_ns) {
922         vmm_free_region(aspace, (vaddr_t)ptr_ns);
923     }
924     if (ptr_s) {
925         vmm_free_region(aspace, (vaddr_t)ptr_s);
926     }
927 }
928 
929 /* Test suite for vmm_obj_slice and vmm_get_obj */
930 
931 typedef struct {
932     vmm_aspace_t* aspace;
933     vaddr_t spot_a_2_page;
934     vaddr_t spot_b_1_page;
935     struct vmm_obj_slice slice;
936 } mmutest_slice_t;
937 
TEST_F_SETUP(mmutest_slice)938 TEST_F_SETUP(mmutest_slice) {
939     _state->aspace = vmm_get_kernel_aspace();
940     _state->spot_a_2_page = 0;
941     _state->spot_b_1_page = 0;
942     vmm_obj_slice_init(&_state->slice);
943     ASSERT_EQ(vmm_alloc(_state->aspace, "mmutest_slice", 2 * PAGE_SIZE,
944                         (void**)&_state->spot_a_2_page, 0, 0,
945                         ARCH_MMU_FLAG_PERM_NO_EXECUTE),
946               NO_ERROR);
947     ASSERT_EQ(vmm_alloc(_state->aspace, "mmutest_slice", PAGE_SIZE,
948                         (void**)&_state->spot_b_1_page, 0, 0,
949                         ARCH_MMU_FLAG_PERM_NO_EXECUTE),
950               NO_ERROR);
951 test_abort:;
952 }
953 
TEST_F_TEARDOWN(mmutest_slice)954 TEST_F_TEARDOWN(mmutest_slice) {
955     vmm_obj_slice_release(&_state->slice);
956     if (_state->spot_a_2_page) {
957         vmm_free_region(_state->aspace, (vaddr_t)_state->spot_a_2_page);
958     }
959 
960     if (_state->spot_b_1_page) {
961         vmm_free_region(_state->aspace, (vaddr_t)_state->spot_b_1_page);
962     }
963 }
964 
965 /*
966  * Simplest use of interface - get the slice for a mapped region,
967  * of the whole size
968  */
TEST_F(mmutest_slice,simple)969 TEST_F(mmutest_slice, simple) {
970     ASSERT_EQ(vmm_get_obj(_state->aspace, _state->spot_b_1_page, PAGE_SIZE,
971                           &_state->slice),
972               NO_ERROR);
973     EXPECT_EQ(_state->slice.offset, 0);
974     EXPECT_EQ(_state->slice.size, PAGE_SIZE);
975 test_abort:;
976 }
977 
978 /* Validate that we will reject an attempt to span two slices */
TEST_F(mmutest_slice,two_objs)979 TEST_F(mmutest_slice, two_objs) {
980     vaddr_t base;
981     size_t size;
982     vaddr_t spot_a = _state->spot_a_2_page;
983     vaddr_t spot_b = _state->spot_b_1_page;
984 
985     base = MIN(spot_a, spot_b);
986     size = MAX(spot_a, spot_b) - base + PAGE_SIZE;
987 
988     /* We should not be able to create a slice spanning both objects */
989     EXPECT_EQ(vmm_get_obj(_state->aspace, base, size, &_state->slice),
990               ERR_OUT_OF_RANGE);
991 
992 test_abort:;
993 }
994 
995 /* Check we can acquire a subslice of a mapped object */
TEST_F(mmutest_slice,subobj)996 TEST_F(mmutest_slice, subobj) {
997     ASSERT_EQ(vmm_get_obj(_state->aspace, _state->spot_a_2_page + PAGE_SIZE,
998                           PAGE_SIZE, &_state->slice),
999               NO_ERROR);
1000 
1001     EXPECT_EQ(_state->slice.offset, PAGE_SIZE);
1002     EXPECT_EQ(_state->slice.size, PAGE_SIZE);
1003 
1004 test_abort:;
1005 }
1006 
1007 /* Check for rejection of the requested range overflows */
TEST_F(mmutest_slice,overflow)1008 TEST_F(mmutest_slice, overflow) {
1009     EXPECT_EQ(vmm_get_obj(_state->aspace, _state->spot_a_2_page, SIZE_MAX,
1010                           &_state->slice),
1011               ERR_INVALID_ARGS);
1012 }
1013 
1014 /* Test suite for PMM */
1015 
1016 typedef struct {
1017     vmm_aspace_t* aspace;
1018 } mmutest_pmm_t;
1019 
TEST_F_SETUP(mmutest_pmm)1020 TEST_F_SETUP(mmutest_pmm) {
1021     _state->aspace = NULL;
1022     status_t ret = vmm_create_aspace_with_quota(&_state->aspace, "mmutestpmm",
1023                                                 PAGE_SIZE * 2, 0);
1024     ASSERT_EQ(NO_ERROR, ret);
1025 test_abort:;
1026 }
1027 
TEST_F_TEARDOWN(mmutest_pmm)1028 TEST_F_TEARDOWN(mmutest_pmm) {
1029     if (_state->aspace) {
1030         ASSERT_EQ(NO_ERROR, vmm_free_aspace(_state->aspace));
1031     }
1032 test_abort:;
1033 }
1034 
1035 /*
1036  * Reserve physical pages and allocate from reserved memory.
1037  */
TEST_F(mmutest_pmm,reserve)1038 TEST_F(mmutest_pmm, reserve) {
1039     void* ptr = NULL;
1040     void* ptr_unused_1 = NULL;
1041     void* ptr_unused_2 = NULL;
1042     status_t ret;
1043     struct vmm_aspace* temp_aspace = NULL;
1044     ret = vmm_alloc(_state->aspace, "test_reserve", PAGE_SIZE * 5002, &ptr, 0,
1045                     VMM_FLAG_NO_PHYSICAL, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1046     ASSERT_EQ(NO_ERROR, ret);
1047     ret = vmm_alloc(_state->aspace, "test_from_reserved", PAGE_SIZE * 2, &ptr,
1048                     0, VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1049                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1050     ASSERT_EQ(NO_ERROR, ret);
1051 
1052     while (!vmm_create_aspace_with_quota(&temp_aspace, "temp_aspace",
1053                                          PAGE_SIZE * 5000, 0)) {
1054     }
1055     ptr += PAGE_SIZE * 2;
1056 
1057     ret = vmm_alloc(_state->aspace, "test_failure", PAGE_SIZE * 5000,
1058                     &ptr_unused_1, 0, 0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1059     ASSERT_EQ(ERR_NO_MEMORY, ret);
1060     ret = vmm_alloc(_state->aspace, "test_success", PAGE_SIZE * 2,
1061                     &ptr_unused_2, 0, 0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1062     ASSERT_EQ(NO_ERROR, ret);
1063     ret = vmm_alloc(temp_aspace, "test_from_reserved_success", PAGE_SIZE * 5000,
1064                     &ptr, 0, VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1065                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1066     ASSERT_EQ(NO_ERROR, ret);
1067 test_abort:
1068     if (temp_aspace)
1069         vmm_free_aspace(temp_aspace);
1070 }
1071 
TEST_F(mmutest_pmm,reserve_contiguous)1072 TEST_F(mmutest_pmm, reserve_contiguous) {
1073     void* ptr = NULL;
1074     status_t ret;
1075     ret = vmm_alloc(_state->aspace, "test_reserve", PAGE_SIZE * 2, &ptr, 0,
1076                     VMM_FLAG_NO_PHYSICAL, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1077     ASSERT_EQ(NO_ERROR, ret);
1078     ret = vmm_alloc_contiguous(_state->aspace, "test_from_reserved_continuous",
1079                                PAGE_SIZE * 2, &ptr, 0,
1080                                VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1081                                ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1082     ASSERT_EQ(NO_ERROR, ret);
1083 test_abort:;
1084 }
1085 
TEST_F(mmutest_pmm,reserve_too_small)1086 TEST_F(mmutest_pmm, reserve_too_small) {
1087     void* ptr = NULL;
1088     status_t ret;
1089     ret = vmm_alloc(_state->aspace, "test_reserve", PAGE_SIZE * 2, &ptr, 0,
1090                     VMM_FLAG_NO_PHYSICAL, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1091     ASSERT_EQ(NO_ERROR, ret);
1092     ret = vmm_alloc(_state->aspace, "test_from_reserved_too_small",
1093                     PAGE_SIZE * 3, &ptr, 0,
1094                     VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1095                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1096     ASSERT_EQ(ERR_NO_MEMORY, ret);
1097 test_abort:;
1098 }
1099 
TEST_F(mmutest_pmm,reserve_outside_region)1100 TEST_F(mmutest_pmm, reserve_outside_region) {
1101     void* ptr = NULL;
1102     status_t ret;
1103     ret = vmm_alloc(_state->aspace, "test_reserve", PAGE_SIZE * 2, &ptr, 0,
1104                     VMM_FLAG_NO_PHYSICAL, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1105     ASSERT_EQ(NO_ERROR, ret);
1106     ptr += PAGE_SIZE;
1107     ret = vmm_alloc(_state->aspace, "test_from_reserved_outside_region",
1108                     PAGE_SIZE * 2, &ptr, 0,
1109                     VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1110                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1111     ASSERT_EQ(ERR_INVALID_ARGS, ret);
1112 test_abort:;
1113 }
1114 
1115 /* Test suite for PMM */
1116 
1117 typedef struct {
1118     vmm_aspace_t* aspace;
1119 } mmutest_res_group_t;
1120 
TEST_F_SETUP(mmutest_res_group)1121 TEST_F_SETUP(mmutest_res_group) {
1122     _state->aspace = NULL;
1123     status_t ret = vmm_create_aspace_with_quota(&_state->aspace, "mmutestrg",
1124                                                 PAGE_SIZE, 0);
1125     ASSERT_EQ(NO_ERROR, ret);
1126 test_abort:;
1127 }
1128 
TEST_F_TEARDOWN(mmutest_res_group)1129 TEST_F_TEARDOWN(mmutest_res_group) {
1130     if (_state->aspace) {
1131         ASSERT_EQ(NO_ERROR, vmm_free_aspace(_state->aspace));
1132     }
1133 test_abort:;
1134 }
1135 
TEST_F(mmutest_res_group,reserve_group_too_big)1136 TEST_F(mmutest_res_group, reserve_group_too_big) {
1137     void* ptr;
1138     status_t ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE + 1, &ptr,
1139                              0, VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1140     ASSERT_EQ(ERR_NO_MEMORY, ret);
1141 test_abort:;
1142 }
1143 
TEST_F(mmutest_res_group,reserve_group_release_ref)1144 TEST_F(mmutest_res_group, reserve_group_release_ref) {
1145     /* Destroying an aspace releases refs on its vmm_objs. */
1146     status_t slice_init = ERR_INVALID_ARGS;
1147     void* ptr;
1148     struct vmm_obj_slice slice;
1149     vmm_obj_slice_init(&slice);
1150     status_t alloc_ret =
1151             vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1152                       VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1153     ASSERT_EQ(NO_ERROR, alloc_ret);
1154     slice_init = vmm_get_obj(_state->aspace, (vaddr_t)ptr, PAGE_SIZE, &slice);
1155     ASSERT_EQ(NO_ERROR, slice_init);
1156     ASSERT_EQ(NO_ERROR, vmm_free_aspace(_state->aspace));
1157     _state->aspace = NULL;
1158     ASSERT_EQ(true, obj_has_only_ref(&slice.obj->obj, &slice.obj_ref));
1159 test_abort:
1160     if (slice_init == NO_ERROR && obj_has_ref(&slice.obj->obj))
1161         obj_del_ref(&slice.obj->obj, &slice.obj_ref, NULL);
1162 }
1163 
TEST_F(mmutest_res_group,no_physical_inner_obj)1164 TEST_F(mmutest_res_group, no_physical_inner_obj) {
1165     void* ptr;
1166     struct vmm_obj_slice slice;
1167     vmm_obj_slice_init(&slice);
1168     status_t ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE * 2, &ptr,
1169                              0, VMM_FLAG_QUOTA | VMM_FLAG_NO_PHYSICAL,
1170                              ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1171     ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1172                     VMM_FLAG_QUOTA | VMM_FLAG_VALLOC_SPECIFIC,
1173                     ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1174     /* vmm_get_obj should look inside NO_PHYSICAL regions and return nested
1175      * vmm_objs from inside. */
1176     ret = vmm_get_obj(_state->aspace, (vaddr_t)ptr, PAGE_SIZE, &slice);
1177     ASSERT_EQ(NO_ERROR, ret);
1178     ASSERT_EQ(PAGE_SIZE, slice.size);
1179     ASSERT_EQ(NO_ERROR, vmm_free_region(_state->aspace, (vaddr_t)ptr));
1180     ASSERT_EQ(true, obj_has_only_ref(&slice.obj->obj, &slice.obj_ref));
1181 test_abort:;
1182 }
1183 
TEST_F(mmutest_res_group,reserve_group_no_physical)1184 TEST_F(mmutest_res_group, reserve_group_no_physical) {
1185     /* NO_PHYSICAL allocations don't count towards memory usage. */
1186     void* ptr;
1187     status_t ret =
1188             vmm_alloc(_state->aspace, "test_reserved_alloc", PAGE_SIZE * 10,
1189                       &ptr, 0, VMM_FLAG_QUOTA | VMM_FLAG_NO_PHYSICAL,
1190                       ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1191     ASSERT_EQ(NO_ERROR, ret);
1192     ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1193                     VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1194     ASSERT_EQ(NO_ERROR, ret);
1195     ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1196                     VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1197     ASSERT_EQ(ERR_NO_MEMORY, ret);
1198 test_abort:;
1199 }
1200 
TEST_F(mmutest_res_group,reserve_group_disable_quota)1201 TEST_F(mmutest_res_group, reserve_group_disable_quota) {
1202     /* Allocations without VMM_FLAG_QUOTA set don't count towards memory usage.
1203      */
1204     void* ptr;
1205     status_t ret =
1206             vmm_alloc(_state->aspace, "test_reserved_alloc", PAGE_SIZE * 10,
1207                       &ptr, 0, 0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1208     ASSERT_EQ(NO_ERROR, ret);
1209     ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1210                     VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1211     ASSERT_EQ(NO_ERROR, ret);
1212     ret = vmm_alloc(_state->aspace, "test_alloc", PAGE_SIZE, &ptr, 0,
1213                     VMM_FLAG_QUOTA, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1214     ASSERT_EQ(ERR_NO_MEMORY, ret);
1215 test_abort:;
1216 }
1217 
1218 PORT_TEST(mmutest, "com.android.kernel.mmutest");
1219