1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/page_allocator.h"
6
7 #include <stdlib.h>
8 #include <string.h>
9
10 #include <algorithm>
11 #include <cstdint>
12 #include <string>
13 #include <vector>
14
15 #include "build/build_config.h"
16 #include "partition_alloc/address_space_randomization.h"
17 #include "partition_alloc/page_allocator_constants.h"
18 #include "partition_alloc/partition_alloc_base/cpu.h"
19 #include "partition_alloc/partition_alloc_base/logging.h"
20 #include "partition_alloc/partition_alloc_base/notreached.h"
21 #include "partition_alloc/partition_alloc_config.h"
22 #include "partition_alloc/tagging.h"
23
24 #if defined(LINUX_NAME_REGION)
25 #include "base/debug/proc_maps_linux.h"
26 #endif
27
28 #include "testing/gtest/include/gtest/gtest.h"
29
30 #if BUILDFLAG(IS_POSIX)
31 #include <setjmp.h>
32 #include <signal.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 #endif // BUILDFLAG(IS_POSIX)
36
37 #include "partition_alloc/arm_bti_test_functions.h"
38
39 #if PA_CONFIG(HAS_MEMORY_TAGGING)
40 #include <arm_acle.h>
41 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
42 #define MTE_KILLED_BY_SIGNAL_AVAILABLE
43 #endif
44 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
45
46 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
47
48 namespace partition_alloc::internal {
49
50 namespace {
51
52 // Any number of bytes that can be allocated with no trouble.
EasyAllocSize()53 size_t EasyAllocSize() {
54 return (1024 * 1024) & ~(PageAllocationGranularity() - 1);
55 }
56
57 // A huge amount of memory, greater than or equal to the ASLR space.
HugeMemoryAmount()58 size_t HugeMemoryAmount() {
59 return std::max(::partition_alloc::internal::ASLRMask(),
60 std::size_t{2} * ::partition_alloc::internal::ASLRMask());
61 }
62
63 } // namespace
64
TEST(PartitionAllocPageAllocatorTest,Rounding)65 TEST(PartitionAllocPageAllocatorTest, Rounding) {
66 EXPECT_EQ(0u, RoundUpToSystemPage(0u));
67 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(1));
68 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize() - 1));
69 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize()));
70 EXPECT_EQ(2 * SystemPageSize(), RoundUpToSystemPage(SystemPageSize() + 1));
71 EXPECT_EQ(0u, RoundDownToSystemPage(0u));
72 EXPECT_EQ(0u, RoundDownToSystemPage(SystemPageSize() - 1));
73 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize()));
74 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize() + 1));
75 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(2 * SystemPageSize() - 1));
76 EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
77 EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(1));
78 EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(
79 PageAllocationGranularity() - 1));
80 EXPECT_EQ(PageAllocationGranularity(),
81 RoundUpToPageAllocationGranularity(PageAllocationGranularity()));
82 EXPECT_EQ(
83 2 * PageAllocationGranularity(),
84 RoundUpToPageAllocationGranularity(PageAllocationGranularity() + 1));
85 EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
86 EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(
87 PageAllocationGranularity() - 1));
88 EXPECT_EQ(PageAllocationGranularity(),
89 RoundDownToPageAllocationGranularity(PageAllocationGranularity()));
90 EXPECT_EQ(PageAllocationGranularity(), RoundDownToPageAllocationGranularity(
91 PageAllocationGranularity() + 1));
92 EXPECT_EQ(PageAllocationGranularity(),
93 RoundDownToPageAllocationGranularity(
94 2 * PageAllocationGranularity() - 1));
95 }
96
TEST(PartitionAllocPageAllocatorTest,NextAlignedWithOffset)97 TEST(PartitionAllocPageAllocatorTest, NextAlignedWithOffset) {
98 EXPECT_EQ(1024u, NextAlignedWithOffset(1024, 1, 0));
99 EXPECT_EQ(2024u, NextAlignedWithOffset(1024, 1024, 1000));
100 EXPECT_EQ(2024u, NextAlignedWithOffset(2024, 1024, 1000));
101 EXPECT_EQ(3048u, NextAlignedWithOffset(2025, 1024, 1000));
102 EXPECT_EQ(2048u, NextAlignedWithOffset(1024, 2048, 0));
103 EXPECT_EQ(2148u, NextAlignedWithOffset(1024, 2048, 100));
104 EXPECT_EQ(2000u, NextAlignedWithOffset(1024, 2048, 2000));
105 }
106
107 // Test that failed page allocations invoke base::ReleaseReservation().
108 // We detect this by making a reservation and ensuring that after failure, we
109 // can make a new reservation.
TEST(PartitionAllocPageAllocatorTest,AllocFailure)110 TEST(PartitionAllocPageAllocatorTest, AllocFailure) {
111 // Release any reservation made by another test.
112 ReleaseReservation();
113
114 // We can make a reservation.
115 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
116
117 // We can't make another reservation until we trigger an allocation failure.
118 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
119
120 size_t size = HugeMemoryAmount();
121 // Skip the test for sanitizers and platforms with ASLR turned off.
122 if (size == 0) {
123 return;
124 }
125
126 uintptr_t result =
127 AllocPages(size, PageAllocationGranularity(),
128 PageAccessibilityConfiguration(
129 PageAccessibilityConfiguration::kInaccessible),
130 PageTag::kChromium);
131 if (!result) {
132 // We triggered allocation failure. Our reservation should have been
133 // released, and we should be able to make a new reservation.
134 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
135 ReleaseReservation();
136 return;
137 }
138 // We couldn't fail. Make sure reservation is still there.
139 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
140 }
141
142 // TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
143 #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
144 #define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
145 #else
146 #define MAYBE_ReserveAddressSpace ReserveAddressSpace
147 #endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
148
149 // Test that reserving address space can fail.
TEST(PartitionAllocPageAllocatorTest,MAYBE_ReserveAddressSpace)150 TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) {
151 // Release any reservation made by another test.
152 ReleaseReservation();
153
154 size_t size = HugeMemoryAmount();
155 // Skip the test for sanitizers and platforms with ASLR turned off.
156 if (size == 0) {
157 return;
158 }
159
160 bool success = ReserveAddressSpace(size);
161 if (!success) {
162 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
163 return;
164 }
165 // We couldn't fail. Make sure reservation is still there.
166 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
167 }
168
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePages)169 TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) {
170 uintptr_t buffer =
171 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
172 PageAccessibilityConfiguration(
173 PageAccessibilityConfiguration::kReadWrite),
174 PageTag::kChromium);
175 EXPECT_TRUE(buffer);
176 int* buffer0 = reinterpret_cast<int*>(buffer);
177 *buffer0 = 42;
178 EXPECT_EQ(42, *buffer0);
179 FreePages(buffer, PageAllocationGranularity());
180 }
181
TEST(PartitionAllocPageAllocatorTest,AllocPagesAligned)182 TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) {
183 size_t alignment = 8 * PageAllocationGranularity();
184 size_t sizes[] = {PageAllocationGranularity(),
185 alignment - PageAllocationGranularity(), alignment,
186 alignment + PageAllocationGranularity(), alignment * 4};
187 size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
188 alignment - PageAllocationGranularity()};
189 for (size_t size : sizes) {
190 for (size_t offset : offsets) {
191 uintptr_t buffer = AllocPagesWithAlignOffset(
192 0, size, alignment, offset,
193 PageAccessibilityConfiguration(
194 PageAccessibilityConfiguration::kReadWrite),
195 PageTag::kChromium);
196 EXPECT_TRUE(buffer);
197 EXPECT_EQ(buffer % alignment, offset);
198 FreePages(buffer, size);
199 }
200 }
201 }
202
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTagged)203 TEST(PartitionAllocPageAllocatorTest,
204 AllocAndFreePagesWithPageReadWriteTagged) {
205 // This test checks that a page allocated with
206 // PageAccessibilityConfiguration::kReadWriteTagged is safe to use on all
207 // systems (even those which don't support MTE).
208 uintptr_t buffer =
209 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
210 PageAccessibilityConfiguration(
211 PageAccessibilityConfiguration::kReadWriteTagged),
212 PageTag::kChromium);
213 EXPECT_TRUE(buffer);
214 int* buffer0 = reinterpret_cast<int*>(buffer);
215 *buffer0 = 42;
216 EXPECT_EQ(42, *buffer0);
217 FreePages(buffer, PageAllocationGranularity());
218 }
219
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadExecuteConfirmCFI)220 TEST(PartitionAllocPageAllocatorTest,
221 AllocAndFreePagesWithPageReadExecuteConfirmCFI) {
222 // This test checks that indirect branches to anything other than a valid
223 // branch target in a PageAccessibilityConfiguration::kReadExecute-mapped
224 // crash on systems which support the Armv8.5 Branch Target Identification
225 // extension.
226 base::CPU cpu;
227 if (!cpu.has_bti()) {
228 #if BUILDFLAG(IS_IOS)
229 // Workaround for incorrectly failed iOS tests with GTEST_SKIP,
230 // see crbug.com/912138 for details.
231 return;
232 #else
233 GTEST_SKIP();
234 #endif
235 }
236 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
237 // Next, map some read-write memory and copy the BTI-enabled function there.
238 uintptr_t buffer =
239 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
240 PageAccessibilityConfiguration(
241 PageAccessibilityConfiguration::kReadWrite),
242 PageTag::kChromium);
243 ptrdiff_t function_range =
244 reinterpret_cast<char*>(arm_bti_test_function_end) -
245 reinterpret_cast<char*>(arm_bti_test_function);
246 ptrdiff_t invalid_offset =
247 reinterpret_cast<char*>(arm_bti_test_function_invalid_offset) -
248 reinterpret_cast<char*>(arm_bti_test_function);
249 memcpy(reinterpret_cast<void*>(buffer),
250 reinterpret_cast<void*>(arm_bti_test_function), function_range);
251
252 // Next re-protect the page.
253 SetSystemPagesAccess(
254 buffer, PageAllocationGranularity(),
255 PageAccessibilityConfiguration(
256 PageAccessibilityConfiguration::kReadExecuteProtected));
257
258 using BTITestFunction = int64_t (*)(int64_t);
259
260 // Attempt to call the function through the BTI-enabled entrypoint. Confirm
261 // that it works.
262 BTITestFunction bti_enabled_fn = reinterpret_cast<BTITestFunction>(buffer);
263 BTITestFunction bti_invalid_fn =
264 reinterpret_cast<BTITestFunction>(buffer + invalid_offset);
265 EXPECT_EQ(bti_enabled_fn(15), 18);
266 // Next, attempt to call the function without the entrypoint.
267 EXPECT_EXIT({ bti_invalid_fn(15); }, testing::KilledBySignal(SIGILL),
268 ""); // Should crash with SIGILL.
269 FreePages(buffer, PageAllocationGranularity());
270 #else
271 PA_NOTREACHED();
272 #endif
273 }
274
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTaggedSynchronous)275 TEST(PartitionAllocPageAllocatorTest,
276 AllocAndFreePagesWithPageReadWriteTaggedSynchronous) {
277 // This test checks that a page allocated with
278 // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
279 // if allocated on a system which supports the
280 // Armv8.5 Memory Tagging Extension.
281 base::CPU cpu;
282 if (!cpu.has_mte()) {
283 // Skip this test if there's no MTE.
284 #if BUILDFLAG(IS_IOS)
285 return;
286 #else
287 GTEST_SKIP();
288 #endif
289 }
290
291 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
292 uintptr_t buffer =
293 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
294 PageAccessibilityConfiguration(
295 PageAccessibilityConfiguration::kReadWriteTagged),
296 PageTag::kChromium);
297 EXPECT_TRUE(buffer);
298 int* buffer0 = reinterpret_cast<int*>(buffer);
299 // Assign an 0x1 tag to the first granule of buffer.
300 int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1);
301 EXPECT_NE(buffer0, buffer1);
302 __arm_mte_set_tag(buffer1);
303 // Retrieve the tag to ensure that it's set.
304 buffer1 = __arm_mte_get_tag(buffer0);
305 // Prove that the tag is different (if they're the same, the test won't work).
306 ASSERT_NE(buffer0, buffer1);
307 TagViolationReportingMode parent_tagging_mode =
308 GetMemoryTaggingModeForCurrentThread();
309 EXPECT_EXIT(
310 {
311 // Switch to synchronous mode.
312 #if BUILDFLAG(IS_ANDROID)
313 ChangeMemoryTaggingModeForAllThreadsPerProcess(
314 TagViolationReportingMode::kSynchronous);
315 #else
316 ChangeMemoryTaggingModeForCurrentThread(
317 TagViolationReportingMode::kSynchronous);
318 #endif // BUILDFLAG(IS_ANDROID)
319 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
320 TagViolationReportingMode::kSynchronous);
321 // Write to the buffer using its previous tag. A segmentation fault
322 // should be delivered.
323 *buffer0 = 42;
324 },
325 testing::KilledBySignal(SIGSEGV), "");
326 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
327 FreePages(buffer, PageAllocationGranularity());
328 #else
329 PA_NOTREACHED();
330 #endif
331 }
332
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTaggedAsynchronous)333 TEST(PartitionAllocPageAllocatorTest,
334 AllocAndFreePagesWithPageReadWriteTaggedAsynchronous) {
335 // This test checks that a page allocated with
336 // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
337 // if allocated on a system which supports MTE.
338 base::CPU cpu;
339 if (!cpu.has_mte()) {
340 // Skip this test if there's no MTE.
341 #if BUILDFLAG(IS_IOS)
342 return;
343 #else
344 GTEST_SKIP();
345 #endif
346 }
347
348 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
349 uintptr_t buffer =
350 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
351 PageAccessibilityConfiguration(
352 PageAccessibilityConfiguration::kReadWriteTagged),
353 PageTag::kChromium);
354 EXPECT_TRUE(buffer);
355 int* buffer0 = reinterpret_cast<int*>(buffer);
356 __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1));
357 int* buffer1 = __arm_mte_get_tag(buffer0);
358 EXPECT_NE(buffer0, buffer1);
359 TagViolationReportingMode parent_tagging_mode =
360 GetMemoryTaggingModeForCurrentThread();
361 EXPECT_EXIT(
362 {
363 // Switch to asynchronous mode.
364 #if BUILDFLAG(IS_ANDROID)
365 ChangeMemoryTaggingModeForAllThreadsPerProcess(
366 TagViolationReportingMode::kAsynchronous);
367 #else
368 ChangeMemoryTaggingModeForCurrentThread(
369 TagViolationReportingMode::kAsynchronous);
370 #endif // BUILDFLAG(IS_ANDROID)
371 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
372 TagViolationReportingMode::kAsynchronous);
373 // Write to the buffer using its previous tag. A fault should be
374 // generated at this point but we may not notice straight away...
375 *buffer0 = 42;
376 EXPECT_EQ(42, *buffer0);
377 PA_LOG(ERROR) << "="; // Until we receive control back from the kernel
378 // (e.g. on a system call).
379 },
380 testing::KilledBySignal(SIGSEGV), "");
381 FreePages(buffer, PageAllocationGranularity());
382 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
383 #else
384 PA_NOTREACHED();
385 #endif
386 }
387
388 // Test permission setting on POSIX, where we can set a trap handler.
389 #if BUILDFLAG(IS_POSIX)
390
391 namespace {
392 sigjmp_buf g_continuation;
393
SignalHandler(int signal,siginfo_t * info,void *)394 void SignalHandler(int signal, siginfo_t* info, void*) {
395 siglongjmp(g_continuation, 1);
396 }
397 } // namespace
398
399 // On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
400 #if BUILDFLAG(IS_APPLE)
401 #define EXTRA_FAULT_BEGIN_ACTION() \
402 struct sigaction old_bus_action; \
403 sigaction(SIGBUS, &action, &old_bus_action);
404 #define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
405 #else
406 #define EXTRA_FAULT_BEGIN_ACTION()
407 #define EXTRA_FAULT_END_ACTION()
408 #endif
409
410 // Install a signal handler so we can catch the fault we're about to trigger.
411 #define FAULT_TEST_BEGIN() \
412 struct sigaction action = {}; \
413 struct sigaction old_action = {}; \
414 action.sa_sigaction = SignalHandler; \
415 sigemptyset(&action.sa_mask); \
416 action.sa_flags = SA_SIGINFO; \
417 sigaction(SIGSEGV, &action, &old_action); \
418 EXTRA_FAULT_BEGIN_ACTION(); \
419 int const save_sigs = 1; \
420 if (!sigsetjmp(g_continuation, save_sigs)) {
421 // Fault generating code goes here...
422
423 // Handle when sigsetjmp returns nonzero (we are returning from our handler).
424 #define FAULT_TEST_END() \
425 } \
426 else { \
427 sigaction(SIGSEGV, &old_action, nullptr); \
428 EXTRA_FAULT_END_ACTION(); \
429 }
430
TEST(PartitionAllocPageAllocatorTest,InaccessiblePages)431 TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) {
432 uintptr_t buffer =
433 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
434 PageAccessibilityConfiguration(
435 PageAccessibilityConfiguration::kInaccessible),
436 PageTag::kChromium);
437 EXPECT_TRUE(buffer);
438
439 FAULT_TEST_BEGIN()
440
441 // Reading from buffer should fault.
442 // Volatile prevents the compiler from eliminating the load by folding
443 // buffer0_contents == *buffer0.
444 volatile int* buffer0 = reinterpret_cast<int*>(buffer);
445 int buffer0_contents = *buffer0;
446 EXPECT_EQ(buffer0_contents, *buffer0);
447 EXPECT_TRUE(false);
448
449 FAULT_TEST_END()
450
451 FreePages(buffer, PageAllocationGranularity());
452 }
453
454 // TODO(crbug.com/1291888): Understand why we can't read from Read-Execute pages
455 // on iOS.
456 #if BUILDFLAG(IS_IOS)
457 #define MAYBE_ReadExecutePages DISABLED_ReadExecutePages
458 #else
459 #define MAYBE_ReadExecutePages ReadExecutePages
460 #endif // BUILDFLAG(IS_IOS)
TEST(PartitionAllocPageAllocatorTest,MAYBE_ReadExecutePages)461 TEST(PartitionAllocPageAllocatorTest, MAYBE_ReadExecutePages) {
462 uintptr_t buffer =
463 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
464 PageAccessibilityConfiguration(
465 PageAccessibilityConfiguration::kReadExecute),
466 PageTag::kChromium);
467 EXPECT_TRUE(buffer);
468 int* buffer0 = reinterpret_cast<int*>(buffer);
469 // Reading from buffer should succeed.
470 int buffer0_contents = *buffer0;
471
472 FAULT_TEST_BEGIN()
473
474 // Writing to buffer should fault.
475 *buffer0 = ~buffer0_contents;
476 EXPECT_TRUE(false);
477
478 FAULT_TEST_END()
479
480 // Make sure no write occurred.
481 EXPECT_EQ(buffer0_contents, *buffer0);
482 FreePages(buffer, PageAllocationGranularity());
483 }
484
485 #endif // BUILDFLAG(IS_POSIX)
486
487 #if defined(LINUX_NAME_REGION)
TEST(PartitionAllocPageAllocatorTest,PageTagging)488 TEST(PartitionAllocPageAllocatorTest, PageTagging) {
489 size_t size = PageAllocationGranularity();
490 uintptr_t buffer =
491 AllocPages(size, PageAllocationGranularity(),
492 PageAccessibilityConfiguration(
493 PageAccessibilityConfiguration::kInaccessible),
494 PageTag::kChromium);
495 ASSERT_TRUE(buffer);
496
497 auto is_region_named = [](uintptr_t start_address) {
498 std::string proc_maps;
499 EXPECT_TRUE(::base::debug::ReadProcMaps(&proc_maps));
500 std::vector<::base::debug::MappedMemoryRegion> regions;
501 EXPECT_TRUE(::base::debug::ParseProcMaps(proc_maps, ®ions));
502
503 bool found = false;
504 for (const auto& region : regions) {
505 if (region.start == start_address) {
506 found = true;
507 return "[anon:chromium]" == region.path;
508 }
509 }
510 EXPECT_TRUE(found);
511 return false;
512 };
513
514 bool before = is_region_named(buffer);
515 DecommitAndZeroSystemPages(buffer, size);
516 bool after = is_region_named(buffer);
517
518 #if BUILDFLAG(IS_ANDROID)
519 EXPECT_TRUE(before) << "VMA tagging should always work on Android";
520 #endif
521 // When not running on Android, the prctl() command may be defined in the
522 // headers, but not be implemented by the host kernel.
523 EXPECT_EQ(before, after);
524
525 FreePages(buffer, size);
526 }
527 #endif // defined(LINUX_NAME_REGION)
528
TEST(PartitionAllocPageAllocatorTest,DecommitErasesMemory)529 TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
530 if (!DecommittedMemoryIsAlwaysZeroed()) {
531 return;
532 }
533
534 size_t size = PageAllocationGranularity();
535 uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
536 PageAccessibilityConfiguration(
537 PageAccessibilityConfiguration::kReadWrite),
538 PageTag::kChromium);
539 ASSERT_TRUE(buffer);
540
541 memset(reinterpret_cast<void*>(buffer), 42, size);
542
543 DecommitSystemPages(buffer, size,
544 PageAccessibilityDisposition::kAllowKeepForPerf);
545 RecommitSystemPages(buffer, size,
546 PageAccessibilityConfiguration(
547 PageAccessibilityConfiguration::kReadWrite),
548 PageAccessibilityDisposition::kAllowKeepForPerf);
549
550 uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
551 uint32_t sum = 0;
552 for (size_t i = 0; i < size; i++) {
553 sum += recommitted_buffer[i];
554 }
555 EXPECT_EQ(0u, sum) << "Data was not erased";
556
557 FreePages(buffer, size);
558 }
559
TEST(PartitionAllocPageAllocatorTest,DecommitAndZero)560 TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) {
561 size_t size = PageAllocationGranularity();
562 uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
563 PageAccessibilityConfiguration(
564 PageAccessibilityConfiguration::kReadWrite),
565 PageTag::kChromium);
566 ASSERT_TRUE(buffer);
567
568 memset(reinterpret_cast<void*>(buffer), 42, size);
569
570 DecommitAndZeroSystemPages(buffer, size);
571
572 // Test permission setting on POSIX, where we can set a trap handler.
573 #if BUILDFLAG(IS_POSIX)
574
575 FAULT_TEST_BEGIN()
576
577 // Reading from buffer should now fault.
578 int* buffer0 = reinterpret_cast<int*>(buffer);
579 int buffer0_contents = *buffer0;
580 EXPECT_EQ(buffer0_contents, *buffer0);
581 EXPECT_TRUE(false);
582
583 FAULT_TEST_END()
584
585 #endif
586
587 // Clients of the DecommitAndZero API (in particular, V8), currently just
588 // call SetSystemPagesAccess to mark the region as accessible again, so we
589 // use that here as well.
590 SetSystemPagesAccess(buffer, size,
591 PageAccessibilityConfiguration(
592 PageAccessibilityConfiguration::kReadWrite));
593
594 uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
595 uint32_t sum = 0;
596 for (size_t i = 0; i < size; i++) {
597 sum += recommitted_buffer[i];
598 }
599 EXPECT_EQ(0u, sum) << "Data was not erased";
600
601 FreePages(buffer, size);
602 }
603
TEST(PartitionAllocPageAllocatorTest,MappedPagesAccounting)604 TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) {
605 size_t size = PageAllocationGranularity();
606 // Ask for a large alignment to make sure that trimming doesn't change the
607 // accounting.
608 size_t alignment = 128 * PageAllocationGranularity();
609 size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
610 alignment - PageAllocationGranularity()};
611
612 size_t mapped_size_before = GetTotalMappedSize();
613
614 for (size_t offset : offsets) {
615 uintptr_t data = AllocPagesWithAlignOffset(
616 0, size, alignment, offset,
617 PageAccessibilityConfiguration(
618 PageAccessibilityConfiguration::kInaccessible),
619 PageTag::kChromium);
620 ASSERT_TRUE(data);
621
622 EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
623
624 DecommitSystemPages(data, size,
625 PageAccessibilityDisposition::kAllowKeepForPerf);
626 EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
627
628 FreePages(data, size);
629 EXPECT_EQ(mapped_size_before, GetTotalMappedSize());
630 }
631 }
632
TEST(PartitionAllocPageAllocatorTest,AllocInaccessibleWillJitLater)633 TEST(PartitionAllocPageAllocatorTest, AllocInaccessibleWillJitLater) {
634 // Verify that kInaccessibleWillJitLater allows read/write, and read/execute
635 // permissions to be set.
636 uintptr_t buffer =
637 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
638 PageAccessibilityConfiguration(
639 PageAccessibilityConfiguration::kInaccessibleWillJitLater),
640 PageTag::kChromium);
641 EXPECT_TRUE(
642 TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
643 PageAccessibilityConfiguration(
644 PageAccessibilityConfiguration::kReadWrite)));
645 EXPECT_TRUE(TrySetSystemPagesAccess(
646 buffer, PageAllocationGranularity(),
647 PageAccessibilityConfiguration(
648 PageAccessibilityConfiguration::kReadExecute)));
649 FreePages(buffer, PageAllocationGranularity());
650 }
651
652 #if BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
653 // TODO(crbug.com/1452151): Fix test to GTEST_SKIP() if MAP_JIT is in-use,
654 // or to be run otherwise, since kReadWriteExecute is used in some other
655 // configurations.
656 #define MAYBE_AllocReadWriteExecute DISABLED_AllocReadWriteExecute
657 #else
658 #define MAYBE_AllocReadWriteExecute AllocReadWriteExecute
659 #endif // BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
TEST(PartitionAllocPageAllocatorTest,MAYBE_AllocReadWriteExecute)660 TEST(PartitionAllocPageAllocatorTest, MAYBE_AllocReadWriteExecute) {
661 // Verify that kReadWriteExecute is similarly functional.
662 uintptr_t buffer =
663 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
664 PageAccessibilityConfiguration(
665 PageAccessibilityConfiguration::kReadWriteExecute),
666 PageTag::kChromium);
667 EXPECT_TRUE(
668 TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
669 PageAccessibilityConfiguration(
670 PageAccessibilityConfiguration::kReadWrite)));
671 EXPECT_TRUE(TrySetSystemPagesAccess(
672 buffer, PageAllocationGranularity(),
673 PageAccessibilityConfiguration(
674 PageAccessibilityConfiguration::kReadExecute)));
675 FreePages(buffer, PageAllocationGranularity());
676 }
677
678 } // namespace partition_alloc::internal
679
680 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
681