1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "config.h"
32 #include "wtf/PartitionAlloc.h"
33
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/OwnPtr.h"
36 #include "wtf/PassOwnPtr.h"
37 #include <gtest/gtest.h>
38 #include <stdlib.h>
39 #include <string.h>
40
41 #if OS(POSIX)
42 #include <sys/mman.h>
43
44 #ifndef MAP_ANONYMOUS
45 #define MAP_ANONYMOUS MAP_ANON
46 #endif
47 #endif // OS(POSIX)
48
49 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
50
51 namespace {
52
53 static const size_t kTestMaxAllocation = 4096;
54 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
55 static PartitionAllocatorGeneric genericAllocator;
56
57 static const size_t kTestAllocSize = 16;
58 #ifdef NDEBUG
59 static const size_t kPointerOffset = 0;
60 static const size_t kExtraAllocSize = 0;
61 #else
62 static const size_t kPointerOffset = WTF::kCookieSize;
63 static const size_t kExtraAllocSize = WTF::kCookieSize * 2;
64 #endif
65 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
66 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
67
TestSetup()68 static void TestSetup()
69 {
70 allocator.init();
71 genericAllocator.init();
72 }
73
TestShutdown()74 static void TestShutdown()
75 {
76 #ifndef NDEBUG
77 // Test that the partition statistic dumping code works. Previously, it
78 // bitrotted because no test calls it.
79 partitionDumpStats(*allocator.root());
80 #endif
81
82 // We expect no leaks in the general case. We have a test for leak
83 // detection.
84 EXPECT_TRUE(allocator.shutdown());
85 EXPECT_TRUE(genericAllocator.shutdown());
86 }
87
GetFullPage(size_t size)88 static WTF::PartitionPage* GetFullPage(size_t size)
89 {
90 size_t realSize = size + kExtraAllocSize;
91 size_t bucketIdx = realSize >> WTF::kBucketShift;
92 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
93 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize;
94 void* first = 0;
95 void* last = 0;
96 size_t i;
97 for (i = 0; i < numSlots; ++i) {
98 void* ptr = partitionAlloc(allocator.root(), size);
99 EXPECT_TRUE(ptr);
100 if (!i)
101 first = WTF::partitionCookieFreePointerAdjust(ptr);
102 else if (i == numSlots - 1)
103 last = WTF::partitionCookieFreePointerAdjust(ptr);
104 }
105 EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(last));
106 if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPage)
107 EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
108 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
109 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
110 EXPECT_TRUE(bucket->activePagesHead);
111 EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage);
112 return bucket->activePagesHead;
113 }
114
FreeFullPage(WTF::PartitionPage * page)115 static void FreeFullPage(WTF::PartitionPage* page)
116 {
117 size_t size = page->bucket->slotSize;
118 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / size;
119 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
120 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
121 size_t i;
122 for (i = 0; i < numSlots; ++i) {
123 partitionFree(ptr + kPointerOffset);
124 ptr += size;
125 }
126 }
127
CycleFreeCache(size_t size)128 static void CycleFreeCache(size_t size)
129 {
130 size_t realSize = size + kExtraAllocSize;
131 size_t bucketIdx = realSize >> WTF::kBucketShift;
132 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
133 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
134
135 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
136 void* ptr = partitionAlloc(allocator.root(), size);
137 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
138 partitionFree(ptr);
139 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
140 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
141 }
142 }
143
CycleGenericFreeCache(size_t size)144 static void CycleGenericFreeCache(size_t size)
145 {
146 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
147 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
148 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
149 WTF::PartitionBucket* bucket = page->bucket;
150 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
151 partitionFreeGeneric(genericAllocator.root(), ptr);
152 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
153 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
154 }
155 }
156
157 // Check that the most basic of allocate / free pairs work.
TEST(PartitionAllocTest,Basic)158 TEST(PartitionAllocTest, Basic)
159 {
160 TestSetup();
161 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
162 WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage;
163
164 EXPECT_FALSE(bucket->freePagesHead);
165 EXPECT_EQ(seedPage, bucket->activePagesHead);
166 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
167
168 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
169 EXPECT_TRUE(ptr);
170 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
171 // Check that the offset appears to include a guard page.
172 EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
173
174 partitionFree(ptr);
175 // Expect that the last active page does not get tossed to the freelist.
176 EXPECT_FALSE(bucket->freePagesHead);
177
178 TestShutdown();
179 }
180
181 // Check that we can detect a memory leak.
TEST(PartitionAllocTest,SimpleLeak)182 TEST(PartitionAllocTest, SimpleLeak)
183 {
184 TestSetup();
185 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
186 (void)leakedPtr;
187 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
188 (void)leakedPtr2;
189 EXPECT_FALSE(allocator.shutdown());
190 EXPECT_FALSE(genericAllocator.shutdown());
191 }
192
193 // Test multiple allocations, and freelist handling.
TEST(PartitionAllocTest,MultiAlloc)194 TEST(PartitionAllocTest, MultiAlloc)
195 {
196 TestSetup();
197
198 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
199 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
200 EXPECT_TRUE(ptr1);
201 EXPECT_TRUE(ptr2);
202 ptrdiff_t diff = ptr2 - ptr1;
203 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
204
205 // Check that we re-use the just-freed slot.
206 partitionFree(ptr2);
207 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
208 EXPECT_TRUE(ptr2);
209 diff = ptr2 - ptr1;
210 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
211 partitionFree(ptr1);
212 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
213 EXPECT_TRUE(ptr1);
214 diff = ptr2 - ptr1;
215 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
216
217 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
218 EXPECT_TRUE(ptr3);
219 diff = ptr3 - ptr1;
220 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
221
222 partitionFree(ptr1);
223 partitionFree(ptr2);
224 partitionFree(ptr3);
225
226 TestShutdown();
227 }
228
229 // Test a bucket with multiple pages.
TEST(PartitionAllocTest,MultiPages)230 TEST(PartitionAllocTest, MultiPages)
231 {
232 TestSetup();
233 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
234
235 WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
236 FreeFullPage(page);
237 EXPECT_FALSE(bucket->freePagesHead);
238 EXPECT_EQ(page, bucket->activePagesHead);
239 EXPECT_EQ(0, page->nextPage);
240 EXPECT_EQ(0, page->numAllocatedSlots);
241
242 page = GetFullPage(kTestAllocSize);
243 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
244
245 EXPECT_EQ(page2, bucket->activePagesHead);
246 EXPECT_EQ(0, page2->nextPage);
247 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
248
249 // Fully free the non-current page. It should not be freelisted because
250 // there is no other immediately useable page. The other page is full.
251 FreeFullPage(page);
252 EXPECT_EQ(0, page->numAllocatedSlots);
253 EXPECT_FALSE(bucket->freePagesHead);
254 EXPECT_EQ(page, bucket->activePagesHead);
255
256 // Allocate a new page, it should pull from the freelist.
257 page = GetFullPage(kTestAllocSize);
258 EXPECT_FALSE(bucket->freePagesHead);
259 EXPECT_EQ(page, bucket->activePagesHead);
260
261 FreeFullPage(page);
262 FreeFullPage(page2);
263 EXPECT_EQ(0, page->numAllocatedSlots);
264 EXPECT_EQ(0, page2->numAllocatedSlots);
265 EXPECT_EQ(0, page2->numUnprovisionedSlots);
266 EXPECT_NE(-1, page2->freeCacheIndex);
267
268 TestShutdown();
269 }
270
271 // Test some finer aspects of internal page transitions.
TEST(PartitionAllocTest,PageTransitions)272 TEST(PartitionAllocTest, PageTransitions)
273 {
274 TestSetup();
275 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
276
277 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
278 EXPECT_EQ(page1, bucket->activePagesHead);
279 EXPECT_EQ(0, page1->nextPage);
280 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
281 EXPECT_EQ(page2, bucket->activePagesHead);
282 EXPECT_EQ(0, page2->nextPage);
283
284 // Bounce page1 back into the non-full list then fill it up again.
285 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
286 partitionFree(ptr);
287 EXPECT_EQ(page1, bucket->activePagesHead);
288 (void) partitionAlloc(allocator.root(), kTestAllocSize);
289 EXPECT_EQ(page1, bucket->activePagesHead);
290 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
291
292 // Allocating another page at this point should cause us to scan over page1
293 // (which is both full and NOT our current page), and evict it from the
294 // freelist. Older code had a O(n^2) condition due to failure to do this.
295 WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
296 EXPECT_EQ(page3, bucket->activePagesHead);
297 EXPECT_EQ(0, page3->nextPage);
298
299 // Work out a pointer into page2 and free it.
300 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
301 partitionFree(ptr);
302 // Trying to allocate at this time should cause us to cycle around to page2
303 // and find the recently freed slot.
304 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
305 EXPECT_EQ(ptr, newPtr);
306 EXPECT_EQ(page2, bucket->activePagesHead);
307 EXPECT_EQ(page3, page2->nextPage);
308
309 // Work out a pointer into page1 and free it. This should pull the page
310 // back into the list of available pages.
311 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
312 partitionFree(ptr);
313 // This allocation should be satisfied by page1.
314 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
315 EXPECT_EQ(ptr, newPtr);
316 EXPECT_EQ(page1, bucket->activePagesHead);
317 EXPECT_EQ(page2, page1->nextPage);
318
319 FreeFullPage(page3);
320 FreeFullPage(page2);
321 FreeFullPage(page1);
322
323 // Allocating whilst in this state exposed a bug, so keep the test.
324 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
325 partitionFree(ptr);
326
327 TestShutdown();
328 }
329
330 // Test some corner cases relating to page transitions in the internal
331 // free page list metadata bucket.
TEST(PartitionAllocTest,FreePageListPageTransitions)332 TEST(PartitionAllocTest, FreePageListPageTransitions)
333 {
334 TestSetup();
335 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
336
337 size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
338 // The +1 is because we need to account for the fact that the current page
339 // never gets thrown on the freelist.
340 ++numToFillFreeListPage;
341 OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
342
343 size_t i;
344 for (i = 0; i < numToFillFreeListPage; ++i) {
345 pages[i] = GetFullPage(kTestAllocSize);
346 }
347 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
348 for (i = 0; i < numToFillFreeListPage; ++i)
349 FreeFullPage(pages[i]);
350 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
351 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
352 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
353 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
354
355 // Allocate / free in a different bucket size so we get control of a
356 // different free page list. We need two pages because one will be the last
357 // active page and not get freed.
358 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
359 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
360 FreeFullPage(page1);
361 FreeFullPage(page2);
362
363 // If we re-allocate all kTestAllocSize allocations, we'll pull all the
364 // free pages and end up freeing the first page for free page objects.
365 // It's getting a bit tricky but a nice re-entrancy is going on:
366 // alloc(kTestAllocSize) -> pulls page from free page list ->
367 // free(PartitionFreepagelistEntry) -> last entry in page freed ->
368 // alloc(PartitionFreepagelistEntry).
369 for (i = 0; i < numToFillFreeListPage; ++i) {
370 pages[i] = GetFullPage(kTestAllocSize);
371 }
372 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
373
374 // As part of the final free-up, we'll test another re-entrancy:
375 // free(kTestAllocSize) -> last entry in page freed ->
376 // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
377 // free(PartitionFreepagelistEntry)
378 for (i = 0; i < numToFillFreeListPage; ++i)
379 FreeFullPage(pages[i]);
380 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
381 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
382 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
383 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
384
385 TestShutdown();
386 }
387
388 // Test a large series of allocations that cross more than one underlying
389 // 64KB super page allocation.
TEST(PartitionAllocTest,MultiPageAllocs)390 TEST(PartitionAllocTest, MultiPageAllocs)
391 {
392 TestSetup();
393 // This is guaranteed to cross a super page boundary because the first
394 // partition page "slot" will be taken up by a guard page.
395 size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
396 // The super page should begin and end in a guard so we one less page in
397 // order to allocate a single page in the new super page.
398 --numPagesNeeded;
399
400 EXPECT_GT(numPagesNeeded, 1u);
401 OwnPtr<WTF::PartitionPage*[]> pages;
402 pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
403 uintptr_t firstSuperPageBase = 0;
404 size_t i;
405 for (i = 0; i < numPagesNeeded; ++i) {
406 pages[i] = GetFullPage(kTestAllocSize);
407 void* storagePtr = partitionPageToPointer(pages[i]);
408 if (!i)
409 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
410 if (i == numPagesNeeded - 1) {
411 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
412 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
413 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
414 // Check that we allocated a guard page for the second page.
415 EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
416 }
417 }
418 for (i = 0; i < numPagesNeeded; ++i)
419 FreeFullPage(pages[i]);
420
421 TestShutdown();
422 }
423
424 // Test the generic allocation functions that can handle arbitrary sizes and
425 // reallocing etc.
TEST(PartitionAllocTest,GenericAlloc)426 TEST(PartitionAllocTest, GenericAlloc)
427 {
428 TestSetup();
429
430 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
431 EXPECT_TRUE(ptr);
432 partitionFreeGeneric(genericAllocator.root(), ptr);
433 ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBucketed + 1);
434 EXPECT_TRUE(ptr);
435 partitionFreeGeneric(genericAllocator.root(), ptr);
436
437 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
438 EXPECT_TRUE(ptr);
439 void* origPtr = ptr;
440 char* charPtr = static_cast<char*>(ptr);
441 *charPtr = 'A';
442
443 // Change the size of the realloc, remaining inside the same bucket.
444 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
445 EXPECT_EQ(ptr, newPtr);
446 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
447 EXPECT_EQ(ptr, newPtr);
448 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket);
449 EXPECT_EQ(ptr, newPtr);
450
451 // Change the size of the realloc, switching buckets.
452 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket + 1);
453 EXPECT_NE(newPtr, ptr);
454 // Check that the realloc copied correctly.
455 char* newCharPtr = static_cast<char*>(newPtr);
456 EXPECT_EQ(*newCharPtr, 'A');
457 #ifndef NDEBUG
458 // Subtle: this checks for an old bug where we copied too much from the
459 // source of the realloc. The condition can be detected by a trashing of
460 // the uninitialized value in the space of the upsized allocation.
461 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket)));
462 #endif
463 *newCharPtr = 'B';
464 // The realloc moved. To check that the old allocation was freed, we can
465 // do an alloc of the old allocation size and check that the old allocation
466 // address is at the head of the freelist and reused.
467 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
468 EXPECT_EQ(reusedPtr, origPtr);
469 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
470
471 // Downsize the realloc.
472 ptr = newPtr;
473 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
474 EXPECT_EQ(newPtr, origPtr);
475 newCharPtr = static_cast<char*>(newPtr);
476 EXPECT_EQ(*newCharPtr, 'B');
477 *newCharPtr = 'C';
478
479 // Upsize the realloc to outside the partition.
480 ptr = newPtr;
481 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 1);
482 EXPECT_NE(newPtr, ptr);
483 newCharPtr = static_cast<char*>(newPtr);
484 EXPECT_EQ(*newCharPtr, 'C');
485 *newCharPtr = 'D';
486
487 // Upsize and downsize the realloc, remaining outside the partition.
488 ptr = newPtr;
489 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 10);
490 newCharPtr = static_cast<char*>(newPtr);
491 EXPECT_EQ(*newCharPtr, 'D');
492 *newCharPtr = 'E';
493 ptr = newPtr;
494 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 2);
495 newCharPtr = static_cast<char*>(newPtr);
496 EXPECT_EQ(*newCharPtr, 'E');
497 *newCharPtr = 'F';
498
499 // Downsize the realloc to inside the partition.
500 ptr = newPtr;
501 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
502 EXPECT_NE(newPtr, ptr);
503 EXPECT_EQ(newPtr, origPtr);
504 newCharPtr = static_cast<char*>(newPtr);
505 EXPECT_EQ(*newCharPtr, 'F');
506
507 partitionFreeGeneric(genericAllocator.root(), newPtr);
508 TestShutdown();
509 }
510
511 // Test the generic allocation functions can handle some specific sizes of
512 // interest.
TEST(PartitionAllocTest,GenericAllocSizes)513 TEST(PartitionAllocTest, GenericAllocSizes)
514 {
515 TestSetup();
516
517 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
518 EXPECT_TRUE(ptr);
519 partitionFreeGeneric(genericAllocator.root(), ptr);
520
521 // kPartitionPageSize is interesting because it results in just one
522 // allocation per page, which tripped up some corner cases.
523 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
524 ptr = partitionAllocGeneric(genericAllocator.root(), size);
525 EXPECT_TRUE(ptr);
526 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
527 EXPECT_TRUE(ptr2);
528 partitionFreeGeneric(genericAllocator.root(), ptr);
529 // Should be freeable at this point.
530 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
531 EXPECT_NE(-1, page->freeCacheIndex);
532 partitionFreeGeneric(genericAllocator.root(), ptr2);
533
534 size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WTF::kSystemPageSize) / 2) - kExtraAllocSize;
535 ptr = partitionAllocGeneric(genericAllocator.root(), size);
536 EXPECT_TRUE(ptr);
537 memset(ptr, 'A', size);
538 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
539 EXPECT_TRUE(ptr2);
540 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
541 EXPECT_TRUE(ptr3);
542 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
543 EXPECT_TRUE(ptr4);
544
545 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
546 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr3));
547 EXPECT_NE(page, page2);
548
549 partitionFreeGeneric(genericAllocator.root(), ptr);
550 partitionFreeGeneric(genericAllocator.root(), ptr3);
551 partitionFreeGeneric(genericAllocator.root(), ptr2);
552 // Should be freeable at this point.
553 EXPECT_NE(-1, page->freeCacheIndex);
554 EXPECT_EQ(0, page->numAllocatedSlots);
555 EXPECT_EQ(0, page->numUnprovisionedSlots);
556 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
557 EXPECT_EQ(ptr3, newPtr);
558 newPtr = partitionAllocGeneric(genericAllocator.root(), size);
559 EXPECT_EQ(ptr2, newPtr);
560 #if OS(LINUX) && defined(NDEBUG)
561 // On Linux, we have a guarantee that freelisting a page should cause its
562 // contents to be nulled out. We check for null here to detect an bug we
563 // had where a large slot size was causing us to not properly free all
564 // resources back to the system.
565 // We only run the check in optimized builds because the debug build
566 // writes over the allocated area with an "uninitialized" byte pattern.
567 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
568 #endif
569 partitionFreeGeneric(genericAllocator.root(), newPtr);
570 partitionFreeGeneric(genericAllocator.root(), ptr3);
571 partitionFreeGeneric(genericAllocator.root(), ptr4);
572
573 // Can we allocate a massive (512MB) size?
574 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
575 partitionFreeGeneric(genericAllocator.root(), ptr);
576
577 // Check a more reasonable, but still direct mapped, size.
578 // Chop a system page and a byte off to test for rounding errors.
579 size = 20 * 1024 * 1024;
580 size -= WTF::kSystemPageSize;
581 size -= 1;
582 ptr = partitionAllocGeneric(genericAllocator.root(), size);
583 char* charPtr = reinterpret_cast<char*>(ptr);
584 *(charPtr + (size - 1)) = 'A';
585 partitionFreeGeneric(genericAllocator.root(), ptr);
586
587 // Can we free null?
588 partitionFreeGeneric(genericAllocator.root(), 0);
589
590 // Do we correctly get a null for a failed allocation?
591 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
592
593 TestShutdown();
594 }
595
596 // Test that we can fetch the real allocated size after an allocation.
TEST(PartitionAllocTest,GenericAllocGetSize)597 TEST(PartitionAllocTest, GenericAllocGetSize)
598 {
599 TestSetup();
600
601 void* ptr;
602 size_t requestedSize, actualSize, predictedSize;
603
604 EXPECT_TRUE(partitionAllocSupportsGetSize());
605
606 // Allocate something small.
607 requestedSize = 511 - kExtraAllocSize;
608 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
609 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
610 EXPECT_TRUE(ptr);
611 actualSize = partitionAllocGetSize(ptr);
612 EXPECT_EQ(predictedSize, actualSize);
613 EXPECT_LT(requestedSize, actualSize);
614 partitionFreeGeneric(genericAllocator.root(), ptr);
615
616 // Allocate a size that should be a perfect match for a bucket, because it
617 // is an exact power of 2.
618 requestedSize = (256 * 1024) - kExtraAllocSize;
619 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
620 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
621 EXPECT_TRUE(ptr);
622 actualSize = partitionAllocGetSize(ptr);
623 EXPECT_EQ(predictedSize, actualSize);
624 EXPECT_EQ(requestedSize, actualSize);
625 partitionFreeGeneric(genericAllocator.root(), ptr);
626
627 // Allocate a size that is a system page smaller than a bucket. GetSize()
628 // should return a larger size than we asked for now.
629 requestedSize = (256 * 1024) - WTF::kSystemPageSize - kExtraAllocSize;
630 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
631 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
632 EXPECT_TRUE(ptr);
633 actualSize = partitionAllocGetSize(ptr);
634 EXPECT_EQ(predictedSize, actualSize);
635 EXPECT_EQ(requestedSize + WTF::kSystemPageSize, actualSize);
636 // Check that we can write at the end of the reported size too.
637 char* charPtr = reinterpret_cast<char*>(ptr);
638 *(charPtr + (actualSize - 1)) = 'A';
639 partitionFreeGeneric(genericAllocator.root(), ptr);
640
641 // Allocate something very large, and uneven.
642 requestedSize = 512 * 1024 * 1024 - 1;
643 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
644 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
645 EXPECT_TRUE(ptr);
646 actualSize = partitionAllocGetSize(ptr);
647 EXPECT_EQ(predictedSize, actualSize);
648 EXPECT_LT(requestedSize, actualSize);
649 partitionFreeGeneric(genericAllocator.root(), ptr);
650
651 // Too large allocation.
652 requestedSize = INT_MAX;
653 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
654 EXPECT_EQ(requestedSize, predictedSize);
655
656 TestShutdown();
657 }
658
659 // Test the realloc() contract.
TEST(PartitionAllocTest,Realloc)660 TEST(PartitionAllocTest, Realloc)
661 {
662 TestSetup();
663
664 // realloc(0, size) should be equivalent to malloc().
665 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
666 memset(ptr, 'A', kTestAllocSize);
667 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
668 // realloc(ptr, 0) should be equivalent to free().
669 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
670 EXPECT_EQ(0, ptr2);
671 EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead);
672
673 // Test that growing an allocation with realloc() copies everything from the
674 // old allocation.
675 size_t size = WTF::kSystemPageSize - kExtraAllocSize;
676 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
677 ptr = partitionAllocGeneric(genericAllocator.root(), size);
678 memset(ptr, 'A', size);
679 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
680 EXPECT_NE(ptr, ptr2);
681 char* charPtr2 = static_cast<char*>(ptr2);
682 EXPECT_EQ('A', charPtr2[0]);
683 EXPECT_EQ('A', charPtr2[size - 1]);
684 #ifndef NDEBUG
685 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
686 #endif
687
688 // Test that shrinking an allocation with realloc() also copies everything
689 // from the old allocation.
690 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
691 EXPECT_NE(ptr2, ptr);
692 char* charPtr = static_cast<char*>(ptr);
693 EXPECT_EQ('A', charPtr[0]);
694 EXPECT_EQ('A', charPtr[size - 2]);
695 #ifndef NDEBUG
696 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
697 #endif
698
699 partitionFreeGeneric(genericAllocator.root(), ptr);
700
701 // Test that shrinking a direct mapped allocation happens in-place.
702 size = WTF::kGenericMaxBucketed + 16 * WTF::kSystemPageSize;
703 ptr = partitionAllocGeneric(genericAllocator.root(), size);
704 size_t actualSize = partitionAllocGetSize(ptr);
705 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 8 * WTF::kSystemPageSize);
706 EXPECT_EQ(ptr, ptr2);
707 EXPECT_EQ(actualSize - 8 * WTF::kSystemPageSize, partitionAllocGetSize(ptr2));
708
709 // Test that a previously in-place shrunk direct mapped allocation can be
710 // expanded up again within its original size.
711 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - WTF::kSystemPageSize);
712 EXPECT_EQ(ptr2, ptr);
713 EXPECT_EQ(actualSize - WTF::kSystemPageSize, partitionAllocGetSize(ptr));
714
715 // Test that a direct mapped allocation is performed not in-place when the
716 // new size is small enough.
717 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kSystemPageSize);
718 EXPECT_NE(ptr, ptr2);
719
720 partitionFreeGeneric(genericAllocator.root(), ptr2);
721
722 TestShutdown();
723 }
724
725 // Tests the handing out of freelists for partial pages.
TEST(PartitionAllocTest,PartialPageFreelists)726 TEST(PartitionAllocTest, PartialPageFreelists)
727 {
728 TestSetup();
729
730 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
731 EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
732 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
733 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
734 EXPECT_EQ(0, bucket->freePagesHead);
735
736 void* ptr = partitionAlloc(allocator.root(), bigSize);
737 EXPECT_TRUE(ptr);
738
739 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
740 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (bigSize + kExtraAllocSize);
741 EXPECT_EQ(4u, totalSlots);
742 // The freelist should have one entry, because we were able to exactly fit
743 // one object slot and one freelist pointer (the null that the head points
744 // to) into a system page.
745 EXPECT_TRUE(page->freelistHead);
746 EXPECT_EQ(1, page->numAllocatedSlots);
747 EXPECT_EQ(2, page->numUnprovisionedSlots);
748
749 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
750 EXPECT_TRUE(ptr2);
751 EXPECT_FALSE(page->freelistHead);
752 EXPECT_EQ(2, page->numAllocatedSlots);
753 EXPECT_EQ(2, page->numUnprovisionedSlots);
754
755 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
756 EXPECT_TRUE(ptr3);
757 EXPECT_TRUE(page->freelistHead);
758 EXPECT_EQ(3, page->numAllocatedSlots);
759 EXPECT_EQ(0, page->numUnprovisionedSlots);
760
761 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
762 EXPECT_TRUE(ptr4);
763 EXPECT_FALSE(page->freelistHead);
764 EXPECT_EQ(4, page->numAllocatedSlots);
765 EXPECT_EQ(0, page->numUnprovisionedSlots);
766
767 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
768 EXPECT_TRUE(ptr5);
769
770 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
771 EXPECT_EQ(1, page2->numAllocatedSlots);
772
773 // Churn things a little whilst there's a partial page freelist.
774 partitionFree(ptr);
775 ptr = partitionAlloc(allocator.root(), bigSize);
776 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
777
778 partitionFree(ptr);
779 partitionFree(ptr2);
780 partitionFree(ptr3);
781 partitionFree(ptr4);
782 partitionFree(ptr5);
783 partitionFree(ptr6);
784 EXPECT_NE(-1, page->freeCacheIndex);
785 EXPECT_NE(-1, page2->freeCacheIndex);
786 EXPECT_TRUE(page2->freelistHead);
787 EXPECT_EQ(0, page2->numAllocatedSlots);
788
789 // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
790 size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize;
791 bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
792 bucket = &allocator.root()->buckets()[bucketIdx];
793 EXPECT_EQ(0, bucket->freePagesHead);
794
795 ptr = partitionAlloc(allocator.root(), mediumSize);
796 EXPECT_TRUE(ptr);
797 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
798 EXPECT_EQ(1, page->numAllocatedSlots);
799 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (mediumSize + kExtraAllocSize);
800 size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
801 EXPECT_EQ(2u, firstPageSlots);
802 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
803
804 partitionFree(ptr);
805
806 size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize;
807 bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
808 bucket = &allocator.root()->buckets()[bucketIdx];
809 EXPECT_EQ(0, bucket->freePagesHead);
810
811 ptr = partitionAlloc(allocator.root(), smallSize);
812 EXPECT_TRUE(ptr);
813 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
814 EXPECT_EQ(1, page->numAllocatedSlots);
815 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (smallSize + kExtraAllocSize);
816 firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
817 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
818
819 partitionFree(ptr);
820 EXPECT_TRUE(page->freelistHead);
821 EXPECT_EQ(0, page->numAllocatedSlots);
822
823 size_t verySmallSize = 32 - kExtraAllocSize;
824 bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
825 bucket = &allocator.root()->buckets()[bucketIdx];
826 EXPECT_EQ(0, bucket->freePagesHead);
827
828 ptr = partitionAlloc(allocator.root(), verySmallSize);
829 EXPECT_TRUE(ptr);
830 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
831 EXPECT_EQ(1, page->numAllocatedSlots);
832 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (verySmallSize + kExtraAllocSize);
833 firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
834 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
835
836 partitionFree(ptr);
837 EXPECT_TRUE(page->freelistHead);
838 EXPECT_EQ(0, page->numAllocatedSlots);
839
840 // And try an allocation size (against the generic allocator) that is
841 // larger than a system page.
842 size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2)) - kExtraAllocSize;
843 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
844 EXPECT_TRUE(ptr);
845 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
846 EXPECT_EQ(1, page->numAllocatedSlots);
847 EXPECT_TRUE(page->freelistHead);
848 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
849 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
850 partitionFreeGeneric(genericAllocator.root(), ptr);
851
852 // And then make sure than exactly the page size only faults one page.
853 size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize;
854 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
855 EXPECT_TRUE(ptr);
856 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
857 EXPECT_EQ(1, page->numAllocatedSlots);
858 EXPECT_FALSE(page->freelistHead);
859 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageSize + kExtraAllocSize);
860 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
861 partitionFreeGeneric(genericAllocator.root(), ptr);
862
863 TestShutdown();
864 }
865
866 // Test some of the fragmentation-resistant properties of the allocator.
TEST(PartitionAllocTest,PageRefilling)867 TEST(PartitionAllocTest, PageRefilling)
868 {
869 TestSetup();
870 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
871
872 // Grab two full pages and a non-full page.
873 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
874 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
875 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
876 EXPECT_TRUE(ptr);
877 EXPECT_NE(page1, bucket->activePagesHead);
878 EXPECT_NE(page2, bucket->activePagesHead);
879 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
880 EXPECT_EQ(1, page->numAllocatedSlots);
881
882 // Work out a pointer into page2 and free it; and then page1 and free it.
883 char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
884 partitionFree(ptr2);
885 ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
886 partitionFree(ptr2);
887
888 // If we perform two allocations from the same bucket now, we expect to
889 // refill both the nearly full pages.
890 (void) partitionAlloc(allocator.root(), kTestAllocSize);
891 (void) partitionAlloc(allocator.root(), kTestAllocSize);
892 EXPECT_EQ(1, page->numAllocatedSlots);
893
894 FreeFullPage(page2);
895 FreeFullPage(page1);
896 partitionFree(ptr);
897
898 TestShutdown();
899 }
900
901 // Basic tests to ensure that allocations work for partial page buckets.
TEST(PartitionAllocTest,PartialPages)902 TEST(PartitionAllocTest, PartialPages)
903 {
904 TestSetup();
905
906 // Find a size that is backed by a partial partition page.
907 size_t size = sizeof(void*);
908 WTF::PartitionBucket* bucket = 0;
909 while (size < kTestMaxAllocation) {
910 bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
911 if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartitionPage)
912 break;
913 size += sizeof(void*);
914 }
915 EXPECT_LT(size, kTestMaxAllocation);
916
917 WTF::PartitionPage* page1 = GetFullPage(size);
918 WTF::PartitionPage* page2 = GetFullPage(size);
919 FreeFullPage(page2);
920 FreeFullPage(page1);
921
922 TestShutdown();
923 }
924
925 // Test correct handling if our mapping collides with another.
TEST(PartitionAllocTest,MappingCollision)926 TEST(PartitionAllocTest, MappingCollision)
927 {
928 TestSetup();
929 // The -2 is because the first and last partition pages in a super page are
930 // guard pages.
931 size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
932 OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
933 OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
934
935 size_t i;
936 for (i = 0; i < numPartitionPagesNeeded; ++i)
937 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
938
939 char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
940 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
941 pageBase -= WTF::kPartitionPageSize;
942 // Map a single system page either side of the mapping for our allocations,
943 // with the goal of tripping up alignment of the next mapping.
944 void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
945 EXPECT_TRUE(map1);
946 void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
947 EXPECT_TRUE(map2);
948 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
949 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
950
951 for (i = 0; i < numPartitionPagesNeeded; ++i)
952 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
953
954 WTF::freePages(map1, WTF::kPageAllocationGranularity);
955 WTF::freePages(map2, WTF::kPageAllocationGranularity);
956
957 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
958 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
959 pageBase -= WTF::kPartitionPageSize;
960 // Map a single system page either side of the mapping for our allocations,
961 // with the goal of tripping up alignment of the next mapping.
962 map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
963 EXPECT_TRUE(map1);
964 map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
965 EXPECT_TRUE(map2);
966 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
967 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
968
969 WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
970 WTF::freePages(map1, WTF::kPageAllocationGranularity);
971 WTF::freePages(map2, WTF::kPageAllocationGranularity);
972
973 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
974
975 // And make sure we really did get a page in a new superpage.
976 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
977 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
978
979 FreeFullPage(pageInThirdSuperPage);
980 for (i = 0; i < numPartitionPagesNeeded; ++i) {
981 FreeFullPage(firstSuperPagePages[i]);
982 FreeFullPage(secondSuperPagePages[i]);
983 }
984
985 TestShutdown();
986 }
987
988 // Tests that pages in the free page cache do get freed as appropriate.
TEST(PartitionAllocTest,FreeCache)989 TEST(PartitionAllocTest, FreeCache)
990 {
991 TestSetup();
992
993 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
994
995 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
996 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
997 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
998
999 void* ptr = partitionAlloc(allocator.root(), bigSize);
1000 EXPECT_TRUE(ptr);
1001 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
1002 EXPECT_EQ(0, bucket->freePagesHead);
1003 EXPECT_EQ(1, page->numAllocatedSlots);
1004 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1005 partitionFree(ptr);
1006 EXPECT_EQ(0, page->numAllocatedSlots);
1007 EXPECT_NE(-1, page->freeCacheIndex);
1008 EXPECT_TRUE(page->freelistHead);
1009
1010 CycleFreeCache(kTestAllocSize);
1011
1012 // Flushing the cache should have really freed the unused page.
1013 EXPECT_FALSE(page->freelistHead);
1014 EXPECT_EQ(-1, page->freeCacheIndex);
1015 EXPECT_EQ(0, page->numAllocatedSlots);
1016 WTF::PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBucketIndex];
1017 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
1018
1019 // Check that an allocation works ok whilst in this state (a free'd page
1020 // as the active pages head).
1021 ptr = partitionAlloc(allocator.root(), bigSize);
1022 EXPECT_FALSE(bucket->freePagesHead);
1023 partitionFree(ptr);
1024
1025 // Also check that a page that is bouncing immediately between empty and
1026 // used does not get freed.
1027 for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) {
1028 ptr = partitionAlloc(allocator.root(), bigSize);
1029 EXPECT_TRUE(page->freelistHead);
1030 partitionFree(ptr);
1031 EXPECT_TRUE(page->freelistHead);
1032 }
1033 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1034 TestShutdown();
1035 }
1036
1037 // Tests for a bug we had with losing references to free pages.
TEST(PartitionAllocTest,LostFreePagesBug)1038 TEST(PartitionAllocTest, LostFreePagesBug)
1039 {
1040 TestSetup();
1041
1042 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
1043
1044 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1045 EXPECT_TRUE(ptr);
1046 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1047 EXPECT_TRUE(ptr2);
1048
1049 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
1050 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr2));
1051 WTF::PartitionBucket* bucket = page->bucket;
1052
1053 EXPECT_EQ(0, bucket->freePagesHead);
1054 EXPECT_EQ(-1, page->numAllocatedSlots);
1055 EXPECT_EQ(1, page2->numAllocatedSlots);
1056
1057 partitionFreeGeneric(genericAllocator.root(), ptr);
1058 partitionFreeGeneric(genericAllocator.root(), ptr2);
1059
1060 EXPECT_EQ(0, bucket->freePagesHead);
1061 EXPECT_EQ(0, page->numAllocatedSlots);
1062 EXPECT_EQ(0, page2->numAllocatedSlots);
1063 EXPECT_TRUE(page->freelistHead);
1064 EXPECT_TRUE(page2->freelistHead);
1065
1066 CycleGenericFreeCache(kTestAllocSize);
1067
1068 EXPECT_FALSE(page->freelistHead);
1069 EXPECT_FALSE(page2->freelistHead);
1070
1071 EXPECT_FALSE(bucket->freePagesHead);
1072 EXPECT_TRUE(bucket->activePagesHead);
1073 EXPECT_TRUE(bucket->activePagesHead->nextPage);
1074
1075 // At this moment, we have two freed pages, on the freelist.
1076
1077 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1078 EXPECT_TRUE(ptr);
1079 partitionFreeGeneric(genericAllocator.root(), ptr);
1080
1081 EXPECT_TRUE(bucket->activePagesHead);
1082 EXPECT_TRUE(bucket->freePagesHead);
1083
1084 CycleGenericFreeCache(kTestAllocSize);
1085
1086 // We're now set up to trigger the bug by scanning over the active pages
1087 // list, where the current active page is freed, and there exists at least
1088 // one freed page in the free pages list.
1089 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1090 EXPECT_TRUE(ptr);
1091 partitionFreeGeneric(genericAllocator.root(), ptr);
1092
1093 EXPECT_TRUE(bucket->activePagesHead);
1094 EXPECT_TRUE(bucket->freePagesHead);
1095
1096 TestShutdown();
1097 }
1098
1099 #if !OS(ANDROID)
1100
1101 // Make sure that malloc(-1) dies.
1102 // In the past, we had an integer overflow that would alias malloc(-1) to
1103 // malloc(0), which is not good.
TEST(PartitionAllocDeathTest,LargeAllocs)1104 TEST(PartitionAllocDeathTest, LargeAllocs)
1105 {
1106 TestSetup();
1107 // Largest alloc.
1108 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
1109 // And the smallest allocation we expect to die.
1110 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
1111
1112 TestShutdown();
1113 }
1114
1115 // Check that our immediate double-free detection works.
TEST(PartitionAllocDeathTest,ImmediateDoubleFree)1116 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
1117 {
1118 TestSetup();
1119
1120 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1121 EXPECT_TRUE(ptr);
1122 partitionFreeGeneric(genericAllocator.root(), ptr);
1123
1124 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1125
1126 TestShutdown();
1127 }
1128
1129 // Check that our refcount-based double-free detection works.
TEST(PartitionAllocDeathTest,RefcountDoubleFree)1130 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
1131 {
1132 TestSetup();
1133
1134 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1135 EXPECT_TRUE(ptr);
1136 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1137 EXPECT_TRUE(ptr2);
1138 partitionFreeGeneric(genericAllocator.root(), ptr);
1139 partitionFreeGeneric(genericAllocator.root(), ptr2);
1140 // This is not an immediate double-free so our immediate detection won't
1141 // fire. However, it does take the "refcount" of the partition page to -1,
1142 // which is illegal and should be trapped.
1143 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1144
1145 TestShutdown();
1146 }
1147
1148 // Check that guard pages are present where expected.
TEST(PartitionAllocDeathTest,GuardPages)1149 TEST(PartitionAllocDeathTest, GuardPages)
1150 {
1151 TestSetup();
1152
1153 // This large size will result in a direct mapped allocation with guard
1154 // pages at either end.
1155 size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllocSize;
1156 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1157 EXPECT_TRUE(ptr);
1158 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1159
1160 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1161 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1162
1163 partitionFreeGeneric(genericAllocator.root(), ptr);
1164
1165 TestShutdown();
1166 }
1167
1168 #endif // !OS(ANDROID)
1169
1170 // Tests that the countLeadingZeros() functions work to our satisfaction.
1171 // It doesn't seem worth the overhead of a whole new file for these tests, so
1172 // we'll put them here since partitionAllocGeneric will depend heavily on these
1173 // functions working correctly.
TEST(PartitionAllocTest,CLZWorks)1174 TEST(PartitionAllocTest, CLZWorks)
1175 {
1176 EXPECT_EQ(32u, WTF::countLeadingZeros32(0));
1177 EXPECT_EQ(31u, WTF::countLeadingZeros32(1));
1178 EXPECT_EQ(1u, WTF::countLeadingZeros32(1 << 30));
1179 EXPECT_EQ(0u, WTF::countLeadingZeros32(1 << 31));
1180
1181 #if CPU(64BIT)
1182 EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
1183 EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
1184 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
1185 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
1186 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
1187 #else
1188 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0));
1189 EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1));
1190 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1 << 30));
1191 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1 << 31));
1192 #endif
1193 }
1194
1195 } // namespace
1196
1197 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1198