1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef WTF_PartitionAlloc_h
32 #define WTF_PartitionAlloc_h
33
34 // DESCRIPTION
35 // partitionAlloc() and partitionFree() are approximately analagous
36 // to malloc() and free().
37 //
38 // The main difference is that a PartitionRoot object must be supplied to
39 // these functions, representing a specific "heap partition" that will
40 // be used to satisfy the allocation. Different partitions are guaranteed to
41 // exist in separate address spaces, including being separate from the main
42 // system heap. If the contained objects are all freed, physical memory is
43 // returned to the system but the address space remains reserved.
44 //
45 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
46 // PartitionAllocator TEMPLATED CLASS. To minimize the instruction count
47 // to the fullest extent possible, the PartitonRoot is really just a
48 // header adjacent to other data areas provided by the PartitionAllocator
49 // class.
50 //
51 // Allocations and frees against a single partition must be single threaded.
52 // Allocations must not exceed a max size, typically 4088 bytes at this time.
53 // Allocation sizes must be aligned to the system pointer size.
54 // The separate APIs partitionAllocGeneric and partitionFreeGeneric are
55 // provided, and they do not have the above three restrictions. In return, you
56 // take a small performance hit.
57 //
58 // This allocator is designed to be extremely fast, thanks to the following
59 // properties and design:
60 // - Just a single (reasonably predicatable) branch in the hot / fast path for
61 // both allocating and (significantly) freeing.
62 // - A minimal number of operations in the hot / fast path, with the slow paths
63 // in separate functions, leading to the possibility of inlining.
64 // - Each partition page (which is usually multiple physical pages) has a header
65 // structure which allows fast mapping of free() address to an underlying
66 // bucket.
67 // - Supports a lock-free API for fast performance in single-threaded cases.
68 // - The freelist for a given bucket is split across a number of partition
69 // pages, enabling various simple tricks to try and minimize fragmentation.
70 // - Fine-grained bucket sizes leading to less waste and better packing.
71 //
72 // The following security properties are provided at this time:
73 // - Linear overflows cannot corrupt into the partition.
74 // - Linear overflows cannot corrupt out of the partition.
75 // - Freed pages will only be re-used within the partition.
76 // - Freed pages will only hold same-sized objects when re-used.
77 // - Dereference of freelist pointer should fault.
78 // - Out-of-line main metadata: linear over or underflow cannot corrupt it.
79 // - Partial pointer overwrite of freelist pointer should fault.
80 // - Rudimentary double-free detection.
81 //
82 // The following security properties could be investigated in the future:
83 // - Per-object bucketing (instead of per-size) is mostly available at the API,
84 // but not used yet.
85 // - No randomness of freelist entries or bucket position.
86 // - Better checking for wild pointers in free().
87 // - Better freelist masking function to guarantee fault on 32-bit.
88
89 #include "wtf/Assertions.h"
90 #include "wtf/ByteSwap.h"
91 #include "wtf/CPU.h"
92 #include "wtf/FastMalloc.h"
93 #include "wtf/PageAllocator.h"
94 #include "wtf/QuantizedAllocation.h"
95 #include "wtf/SpinLock.h"
96
97 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
98 #include <stdlib.h>
99 #endif
100
101 #ifndef NDEBUG
102 #include <string.h>
103 #endif
104
105 namespace WTF {
106
107 // Maximum size of a partition's mappings. 2046MB. Note that the total amount of
108 // bytes allocatable at the API will be smaller. This is because things like
109 // guard pages, metadata, page headers and wasted space come out of the total.
110 // The 2GB is not necessarily contiguous in virtual address space.
111 static const size_t kMaxPartitionSize = 2046u * 1024u * 1024u;
112 // Allocation granularity of sizeof(void*) bytes.
113 static const size_t kAllocationGranularity = sizeof(void*);
114 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
115 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
116 // Underlying partition storage pages are a power-of-two size. It is typical
117 // for a partition page to be based on multiple system pages. We rarely deal
118 // with system pages. Most references to "page" refer to partition pages. We
119 // do also have the concept of "super pages" -- these are the underlying
120 // system allocations we make. Super pages contain multiple partition pages
121 // inside them.
122 static const size_t kPartitionPageShift = 14; // 16KB
123 static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
124 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
125 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
126 // To avoid fragmentation via never-used freelist entries, we hand out partition
127 // freelist sections gradually, in units of the dominant system page size.
128 // What we're actually doing is avoiding filling the full partition page
129 // (typically 16KB) will freelist pointers right away. Writing freelist
130 // pointers will fault and dirty a private page, which is very wasteful if we
131 // never actually store objects there.
132 static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSystemPageSize;
133
134 // We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
135 // These chunks are called "super pages". We do this so that we can store
136 // metadata in the first few pages of each 2MB aligned section. This leads to
137 // a very fast free(). We specifically choose 2MB because this virtual address
138 // block represents a full but single PTE allocation on ARM, ia32 and x64.
139 static const size_t kSuperPageShift = 21; // 2MB
140 static const size_t kSuperPageSize = 1 << kSuperPageShift;
141 static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
142 static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
143 static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartitionPageSize;
144
145 static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
146 static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
147
148 #ifndef NDEBUG
149 // These two byte values match tcmalloc.
150 static const unsigned char kUninitializedByte = 0xAB;
151 static const unsigned char kFreedByte = 0xCD;
152 #if CPU(64BIT)
153 static const uintptr_t kCookieValue = 0xDEADBEEFDEADBEEFul;
154 #else
155 static const uintptr_t kCookieValue = 0xDEADBEEFu;
156 #endif
157 #endif
158
159 struct PartitionRoot;
160 struct PartitionBucket;
161
162 struct PartitionFreelistEntry {
163 PartitionFreelistEntry* next;
164 };
165
166 // Some notes on page states. A page can be in one of three major states:
167 // 1) Active.
168 // 2) Full.
169 // 3) Free.
170 // An active page has available free slots. A full page has no free slots. A
171 // free page has had its backing memory released back to the system.
172 // There are two linked lists tracking the pages. The "active page" list is an
173 // approximation of a list of active pages. It is an approximation because both
174 // free and full pages may briefly be present in the list until we next do a
175 // scan over it. The "free page" list is an accurate list of pages which have
176 // been returned back to the system.
177 // The significant page transitions are:
178 // - free() will detect when a full page has a slot free()'d and immediately
179 // return the page to the head of the active list.
180 // - free() will detect when a page is fully emptied. It _may_ add it to the
181 // free list and it _may_ leave it on the active list until a future list scan.
182 // - malloc() _may_ scan the active page list in order to fulfil the request.
183 // If it does this, full and free pages encountered will be booted out of the
184 // active list. If there are no suitable active pages found, a free page (if one
185 // exists) will be pulled from the free list on to the active list.
186 struct PartitionPage {
187 union { // Accessed most in hot path => goes first.
188 PartitionFreelistEntry* freelistHead; // If the page is active.
189 PartitionPage* freePageNext; // If the page is free.
190 } u;
191 PartitionPage* activePageNext;
192 PartitionBucket* bucket;
193 int numAllocatedSlots; // Deliberately signed, -1 for free page, -n for full pages.
194 unsigned numUnprovisionedSlots;
195 };
196
197 struct PartitionBucket {
198 PartitionPage* activePagesHead; // Accessed most in hot path => goes first.
199 PartitionPage* freePagesHead;
200 PartitionRoot* root;
201 unsigned numFullPages;
202 unsigned pageSize;
203 };
204
205 // An "extent" is a span of consecutive superpages. We link to the partition's
206 // next extent (if there is one) at the very start of a superpage's metadata
207 // area.
208 struct PartitionSuperPageExtentEntry {
209 char* superPageBase;
210 char* superPagesEnd;
211 PartitionSuperPageExtentEntry* next;
212 };
213
214 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
215 struct PartitionRoot {
216 int lock;
217 size_t totalSizeOfSuperPages;
218 unsigned numBuckets;
219 unsigned maxAllocation;
220 bool initialized;
221 char* nextSuperPage;
222 char* nextPartitionPage;
223 char* nextPartitionPageEnd;
224 PartitionSuperPageExtentEntry* currentExtent;
225 PartitionSuperPageExtentEntry firstExtent;
226 PartitionPage seedPage;
227 PartitionBucket seedBucket;
228
229 // The PartitionAlloc templated class ensures the following is correct.
bucketsPartitionRoot230 ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<PartitionBucket*>(this + 1); }
bucketsPartitionRoot231 ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_cast<const PartitionBucket*>(this + 1); }
232 };
233
234 WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t maxAllocation);
235 WTF_EXPORT NEVER_INLINE bool partitionAllocShutdown(PartitionRoot*);
236
237 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionBucket*);
238 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
239 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRoot*, void*, size_t);
240
241 // The plan is to eventually remove the SuperPageBitmap.
242 #if CPU(32BIT)
243 class SuperPageBitmap {
244 public:
isAvailable()245 ALWAYS_INLINE static bool isAvailable()
246 {
247 return true;
248 }
249
isPointerInSuperPage(void * ptr)250 ALWAYS_INLINE static bool isPointerInSuperPage(void* ptr)
251 {
252 uintptr_t raw = reinterpret_cast<uintptr_t>(ptr);
253 raw >>= kSuperPageShift;
254 size_t byteIndex = raw >> 3;
255 size_t bit = raw & 7;
256 ASSERT(byteIndex < sizeof(s_bitmap));
257 return s_bitmap[byteIndex] & (1 << bit);
258 }
259
260 static void registerSuperPage(void* ptr);
261 static void unregisterSuperPage(void* ptr);
262
263 private:
264 WTF_EXPORT static unsigned char s_bitmap[1 << (32 - kSuperPageShift - 3)];
265 };
266
267 #else // CPU(32BIT)
268
269 class SuperPageBitmap {
270 public:
isAvailable()271 ALWAYS_INLINE static bool isAvailable()
272 {
273 return false;
274 }
275
isPointerInSuperPage(void * ptr)276 ALWAYS_INLINE static bool isPointerInSuperPage(void* ptr)
277 {
278 ASSERT(false);
279 return false;
280 }
281
registerSuperPage(void * ptr)282 static void registerSuperPage(void* ptr) { }
unregisterSuperPage(void * ptr)283 static void unregisterSuperPage(void* ptr) { }
284 };
285
286 #endif // CPU(32BIT)
287
partitionFreelistMask(PartitionFreelistEntry * ptr)288 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEntry* ptr)
289 {
290 // We use bswap on little endian as a fast mask for two reasons:
291 // 1) If an object is freed and its vtable used where the attacker doesn't
292 // get the chance to run allocations between the free and use, the vtable
293 // dereference is likely to fault.
294 // 2) If the attacker has a linear buffer overflow and elects to try and
295 // corrupt a freelist pointer, partial pointer overwrite attacks are
296 // thwarted.
297 // For big endian, similar guarantees are arrived at with a negation.
298 #if CPU(BIG_ENDIAN)
299 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
300 #else
301 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
302 #endif
303 return reinterpret_cast<PartitionFreelistEntry*>(masked);
304 }
305
partitionCookieSizeAdjustAdd(size_t size)306 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size)
307 {
308 #ifndef NDEBUG
309 // Add space for cookies.
310 size += 2 * sizeof(uintptr_t);
311 #endif
312 return size;
313 }
314
partitionCookieSizeAdjustSubtract(size_t size)315 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size)
316 {
317 #ifndef NDEBUG
318 // Remove space for cookies.
319 size -= 2 * sizeof(uintptr_t);
320 #endif
321 return size;
322 }
323
partitionCookieFreePointerAdjust(void * ptr)324 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr)
325 {
326 #ifndef NDEBUG
327 // The value given to the application is actually just after the cookie.
328 ptr = static_cast<uintptr_t*>(ptr) - 1;
329 #endif
330 return ptr;
331 }
332
partitionBucketSize(const PartitionBucket * bucket)333 ALWAYS_INLINE size_t partitionBucketSize(const PartitionBucket* bucket)
334 {
335 PartitionRoot* root = bucket->root;
336 size_t index = bucket - &root->buckets()[0];
337 size_t size = index << kBucketShift;
338 // Make sure the zero-sized bucket actually has space for freelist pointers.
339 if (UNLIKELY(!size))
340 size = kAllocationGranularity;
341 return size;
342 }
343
partitionSuperPageToMetadataArea(char * ptr)344 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
345 {
346 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
347 ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
348 // The metadata area is exactly one system page (the guard page) into the
349 // super page.
350 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
351 }
352
partitionPointerToPageNoAlignmentCheck(void * ptr)353 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr)
354 {
355 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
356 char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
357 uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
358 // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
359 ASSERT(partitionPageIndex);
360 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
361 PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToMetadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift));
362 return page;
363 }
364
partitionPointerToPage(void * ptr)365 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
366 {
367 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
368 // Checks that the pointer is a multiple of bucket size.
369 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) & kPartitionPageOffsetMask) % partitionBucketSize(page->bucket)));
370 return page;
371 }
372
partitionPageToPointer(PartitionPage * page)373 ALWAYS_INLINE void* partitionPageToPointer(PartitionPage* page)
374 {
375 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
376 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
377 ASSERT(superPageOffset > kSystemPageSize);
378 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
379 uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
380 // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
381 ASSERT(partitionPageIndex);
382 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
383 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
384 void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << kPartitionPageShift));
385 return ret;
386 }
387
partitionPointerIsValid(PartitionRoot * root,void * ptr)388 ALWAYS_INLINE bool partitionPointerIsValid(PartitionRoot* root, void* ptr)
389 {
390 // On 32-bit systems, we have an optimization where we have a bitmap that
391 // can instantly tell us if a pointer is in a super page or not.
392 // It is a global bitmap instead of a per-partition bitmap but this is a
393 // reasonable space vs. accuracy trade off.
394 if (SuperPageBitmap::isAvailable())
395 return SuperPageBitmap::isPointerInSuperPage(ptr);
396
397 // On 64-bit systems, we check the list of super page extents. Due to the
398 // massive address space, we typically have a single extent.
399 // Dominant case: the pointer is in the first extent, which grew without any collision.
400 if (LIKELY(ptr >= root->firstExtent.superPageBase) && LIKELY(ptr < root->firstExtent.superPagesEnd))
401 return true;
402
403 // Otherwise, scan through the extent list.
404 PartitionSuperPageExtentEntry* entry = root->firstExtent.next;
405 while (UNLIKELY(entry != 0)) {
406 if (ptr >= entry->superPageBase && ptr < entry->superPagesEnd)
407 return true;
408 entry = entry->next;
409 }
410
411 return false;
412 }
413
partitionPageIsFree(PartitionPage * page)414 ALWAYS_INLINE bool partitionPageIsFree(PartitionPage* page)
415 {
416 return (page->numAllocatedSlots == -1);
417 }
418
partitionPageFreelistHead(PartitionPage * page)419 ALWAYS_INLINE PartitionFreelistEntry* partitionPageFreelistHead(PartitionPage* page)
420 {
421 ASSERT((page == &page->bucket->root->seedPage && !page->u.freelistHead) || !partitionPageIsFree(page));
422 return page->u.freelistHead;
423 }
424
partitionPageSetFreelistHead(PartitionPage * page,PartitionFreelistEntry * newHead)425 ALWAYS_INLINE void partitionPageSetFreelistHead(PartitionPage* page, PartitionFreelistEntry* newHead)
426 {
427 ASSERT(!partitionPageIsFree(page));
428 page->u.freelistHead = newHead;
429 }
430
partitionBucketAlloc(PartitionBucket * bucket)431 ALWAYS_INLINE void* partitionBucketAlloc(PartitionBucket* bucket)
432 {
433 PartitionPage* page = bucket->activePagesHead;
434 ASSERT(page == &bucket->root->seedPage || page->numAllocatedSlots >= 0);
435 void* ret = partitionPageFreelistHead(page);
436 if (LIKELY(ret != 0)) {
437 // If these asserts fire, you probably corrupted memory.
438 ASSERT(partitionPointerIsValid(bucket->root, ret));
439 ASSERT(partitionPointerToPage(ret));
440 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
441 partitionPageSetFreelistHead(page, newHead);
442 ASSERT(!partitionPageFreelistHead(page) || partitionPointerIsValid(bucket->root, partitionPageFreelistHead(page)));
443 ASSERT(!partitionPageFreelistHead(page) || partitionPointerToPage(partitionPageFreelistHead(page)));
444 page->numAllocatedSlots++;
445 } else {
446 ret = partitionAllocSlowPath(bucket);
447 }
448 #ifndef NDEBUG
449 // Fill the uninitialized pattern. and write the cookies.
450 size_t bucketSize = partitionBucketSize(bucket);
451 memset(ret, kUninitializedByte, bucketSize);
452 *(static_cast<uintptr_t*>(ret)) = kCookieValue;
453 void* retEnd = static_cast<char*>(ret) + bucketSize;
454 *(static_cast<uintptr_t*>(retEnd) - 1) = kCookieValue;
455 // The value given to the application is actually just after the cookie.
456 ret = static_cast<uintptr_t*>(ret) + 1;
457 #endif
458 return ret;
459 }
460
partitionAlloc(PartitionRoot * root,size_t size)461 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
462 {
463 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
464 void* result = malloc(size);
465 RELEASE_ASSERT(result);
466 return result;
467 #else
468 size = partitionCookieSizeAdjustAdd(size);
469 ASSERT(root->initialized);
470 size_t index = size >> kBucketShift;
471 ASSERT(index < root->numBuckets);
472 ASSERT(size == index << kBucketShift);
473 PartitionBucket* bucket = &root->buckets()[index];
474 return partitionBucketAlloc(bucket);
475 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
476 }
477
partitionFreeWithPage(void * ptr,PartitionPage * page)478 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page)
479 {
480 // If these asserts fire, you probably corrupted memory.
481 #ifndef NDEBUG
482 size_t bucketSize = partitionBucketSize(page->bucket);
483 void* ptrEnd = static_cast<char*>(ptr) + bucketSize;
484 ASSERT(*(static_cast<uintptr_t*>(ptr)) == kCookieValue);
485 ASSERT(*(static_cast<uintptr_t*>(ptrEnd) - 1) == kCookieValue);
486 memset(ptr, kFreedByte, bucketSize);
487 #endif
488 ASSERT(!partitionPageFreelistHead(page) || partitionPointerIsValid(page->bucket->root, partitionPageFreelistHead(page)));
489 ASSERT(!partitionPageFreelistHead(page) || partitionPointerToPage(partitionPageFreelistHead(page)));
490 RELEASE_ASSERT(ptr != partitionPageFreelistHead(page)); // Catches an immediate double free.
491 ASSERT(!partitionPageFreelistHead(page) || ptr != partitionFreelistMask(partitionPageFreelistHead(page)->next)); // Look for double free one level deeper in debug.
492 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
493 entry->next = partitionFreelistMask(partitionPageFreelistHead(page));
494 partitionPageSetFreelistHead(page, entry);
495 --page->numAllocatedSlots;
496 if (UNLIKELY(page->numAllocatedSlots <= 0))
497 partitionFreeSlowPath(page);
498 }
499
partitionFree(void * ptr)500 ALWAYS_INLINE void partitionFree(void* ptr)
501 {
502 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
503 free(ptr);
504 #else
505 ptr = partitionCookieFreePointerAdjust(ptr);
506 PartitionPage* page = partitionPointerToPage(ptr);
507 ASSERT(partitionPointerIsValid(page->bucket->root, ptr));
508 partitionFreeWithPage(ptr, page);
509 #endif
510 }
511
partitionAllocGeneric(PartitionRoot * root,size_t size)512 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRoot* root, size_t size)
513 {
514 RELEASE_ASSERT(size <= QuantizedAllocation::kMaxUnquantizedAllocation);
515 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
516 void* result = malloc(size);
517 RELEASE_ASSERT(result);
518 return result;
519 #else
520 ASSERT(root->initialized);
521 size = QuantizedAllocation::quantizedSize(size);
522 size_t realSize = partitionCookieSizeAdjustAdd(size);
523 if (LIKELY(realSize <= root->maxAllocation)) {
524 spinLockLock(&root->lock);
525 void* ret = partitionAlloc(root, size);
526 spinLockUnlock(&root->lock);
527 return ret;
528 }
529 return WTF::fastMalloc(size);
530 #endif
531 }
532
partitionFreeGeneric(PartitionRoot * root,void * ptr)533 ALWAYS_INLINE void partitionFreeGeneric(PartitionRoot* root, void* ptr)
534 {
535 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
536 free(ptr);
537 #else
538 ASSERT(root->initialized);
539 if (LIKELY(partitionPointerIsValid(root, ptr))) {
540 ptr = partitionCookieFreePointerAdjust(ptr);
541 PartitionPage* page = partitionPointerToPage(ptr);
542 spinLockLock(&root->lock);
543 partitionFreeWithPage(ptr, page);
544 spinLockUnlock(&root->lock);
545 return;
546 }
547 return WTF::fastFree(ptr);
548 #endif
549 }
550
551 // N (or more accurately, N - sizeof(void*)) represents the largest size in
552 // bytes that will be handled by a PartitionAlloctor.
553 // Attempts to partitionAlloc() more than this amount will fail. Attempts to
554 // partitionAllocGeneic() more than this amount will succeed but will be
555 // transparently serviced by the system allocator.
556 template <size_t N>
557 class PartitionAllocator {
558 public:
559 static const size_t kMaxAllocation = N - kAllocationGranularity;
560 static const size_t kNumBuckets = N / kAllocationGranularity;
init()561 void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); }
shutdown()562 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
root()563 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
564 private:
565 PartitionRoot m_partitionRoot;
566 PartitionBucket m_actualBuckets[kNumBuckets];
567 };
568
569 } // namespace WTF
570
571 using WTF::PartitionAllocator;
572 using WTF::PartitionRoot;
573 using WTF::partitionAllocInit;
574 using WTF::partitionAllocShutdown;
575 using WTF::partitionAlloc;
576 using WTF::partitionFree;
577 using WTF::partitionAllocGeneric;
578 using WTF::partitionFreeGeneric;
579 using WTF::partitionReallocGeneric;
580
581 #endif // WTF_PartitionAlloc_h
582