1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
6 #define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
7
8 // DESCRIPTION
9 // PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
10 // PartitionRootGeneric::Free() are approximately analagous to malloc() and
11 // free().
12 //
13 // The main difference is that a PartitionRoot / PartitionRootGeneric object
14 // must be supplied to these functions, representing a specific "heap partition"
15 // that will be used to satisfy the allocation. Different partitions are
16 // guaranteed to exist in separate address spaces, including being separate from
17 // the main system heap. If the contained objects are all freed, physical memory
18 // is returned to the system but the address space remains reserved.
19 // See PartitionAlloc.md for other security properties PartitionAlloc provides.
20 //
21 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
22 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
23 // minimize the instruction count to the fullest extent possible, the
24 // PartitionRoot is really just a header adjacent to other data areas provided
25 // by the allocator class.
26 //
27 // The PartitionRoot::Alloc() variant of the API has the following caveats:
28 // - Allocations and frees against a single partition must be single threaded.
29 // - Allocations must not exceed a max size, chosen at compile-time via a
30 // templated parameter to PartitionAllocator.
31 // - Allocation sizes must be aligned to the system pointer size.
32 // - Allocations are bucketed exactly according to size.
33 //
34 // And for PartitionRootGeneric::Alloc():
35 // - Multi-threaded use against a single partition is ok; locking is handled.
36 // - Allocations of any arbitrary size can be handled (subject to a limit of
37 // INT_MAX bytes for security reasons).
38 // - Bucketing is by approximate size, for example an allocation of 4000 bytes
39 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
40 // keep worst-case waste to ~10%.
41 //
42 // The allocators are designed to be extremely fast, thanks to the following
43 // properties and design:
44 // - Just two single (reasonably predicatable) branches in the hot / fast path
45 // for both allocating and (significantly) freeing.
46 // - A minimal number of operations in the hot / fast path, with the slow paths
47 // in separate functions, leading to the possibility of inlining.
48 // - Each partition page (which is usually multiple physical pages) has a
49 // metadata structure which allows fast mapping of free() address to an
50 // underlying bucket.
51 // - Supports a lock-free API for fast performance in single-threaded cases.
52 // - The freelist for a given bucket is split across a number of partition
53 // pages, enabling various simple tricks to try and minimize fragmentation.
54 // - Fine-grained bucket sizes leading to less waste and better packing.
55 //
56 // The following security properties could be investigated in the future:
57 // - Per-object bucketing (instead of per-size) is mostly available at the API,
58 // but not used yet.
59 // - No randomness of freelist entries or bucket position.
60 // - Better checking for wild pointers in free().
61 // - Better freelist masking function to guarantee fault on 32-bit.
62
63 #include <limits.h>
64 #include <string.h>
65
66 #include "build/build_config.h"
67 #include "third_party/base/allocator/partition_allocator/page_allocator.h"
68 #include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
69 #include "third_party/base/allocator/partition_allocator/partition_bucket.h"
70 #include "third_party/base/allocator/partition_allocator/partition_cookie.h"
71 #include "third_party/base/allocator/partition_allocator/partition_page.h"
72 #include "third_party/base/allocator/partition_allocator/partition_root_base.h"
73 #include "third_party/base/allocator/partition_allocator/spin_lock.h"
74 #include "third_party/base/base_export.h"
75 #include "third_party/base/bits.h"
76 #include "third_party/base/compiler_specific.h"
77 #include "third_party/base/logging.h"
78 #include "third_party/base/stl_util.h"
79 #include "third_party/base/sys_byteorder.h"
80
81 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
82 #include <stdlib.h>
83 #endif
84
85 // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
86 // size as other alloc code.
87 #define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
88 if (size > kGenericMaxDirectMapped) { \
89 if (flags & PartitionAllocReturnNull) { \
90 return nullptr; \
91 } \
92 CHECK(false); \
93 }
94
95 namespace pdfium {
96 namespace base {
97
98 class PartitionStatsDumper;
99
100 enum PartitionPurgeFlags {
101 // Decommitting the ring list of empty pages is reasonably fast.
102 PartitionPurgeDecommitEmptyPages = 1 << 0,
103 // Discarding unused system pages is slower, because it involves walking all
104 // freelists in all active partition pages of all buckets >= system page
105 // size. It often frees a similar amount of memory to decommitting the empty
106 // pages, though.
107 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
108 };
109
110 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
111 struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase {
112 PartitionRoot();
113 ~PartitionRoot() override;
114 // This references the buckets OFF the edge of this struct. All uses of
115 // PartitionRoot must have the bucket array come right after.
116 //
117 // The PartitionAlloc templated class ensures the following is correct.
bucketsPartitionRoot118 ALWAYS_INLINE internal::PartitionBucket* buckets() {
119 return reinterpret_cast<internal::PartitionBucket*>(this + 1);
120 }
bucketsPartitionRoot121 ALWAYS_INLINE const internal::PartitionBucket* buckets() const {
122 return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
123 }
124
125 void Init(size_t bucket_count, size_t maximum_allocation);
126
127 ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
128 ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
129
130 void PurgeMemory(int flags) override;
131
132 void DumpStats(const char* partition_name,
133 bool is_light_dump,
134 PartitionStatsDumper* dumper);
135 };
136
137 // Never instantiate a PartitionRootGeneric directly, instead use
138 // PartitionAllocatorGeneric.
139 struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase {
140 PartitionRootGeneric();
141 ~PartitionRootGeneric() override;
142 subtle::SpinLock lock;
143 // Some pre-computed constants.
144 size_t order_index_shifts[kBitsPerSizeT + 1] = {};
145 size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
146 // The bucket lookup table lets us map a size_t to a bucket quickly.
147 // The trailing +1 caters for the overflow case for very large allocation
148 // sizes. It is one flat array instead of a 2D array because in the 2D
149 // world, we'd need to index array[blah][max+1] which risks undefined
150 // behavior.
151 internal::PartitionBucket*
152 bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
153 {};
154 internal::PartitionBucket buckets[kGenericNumBuckets] = {};
155
156 // Public API.
157 void Init();
158
159 ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
160 ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
161 ALWAYS_INLINE void Free(void* ptr);
162
163 NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
164 // Overload that may return nullptr if reallocation isn't possible. In this
165 // case, |ptr| remains valid.
166 NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name);
167
168 ALWAYS_INLINE size_t ActualSize(size_t size);
169
170 void PurgeMemory(int flags) override;
171
172 void DumpStats(const char* partition_name,
173 bool is_light_dump,
174 PartitionStatsDumper* partition_stats_dumper);
175 };
176
177 // Struct used to retrieve total memory usage of a partition. Used by
178 // PartitionStatsDumper implementation.
179 struct PartitionMemoryStats {
180 size_t total_mmapped_bytes; // Total bytes mmaped from the system.
181 size_t total_committed_bytes; // Total size of commmitted pages.
182 size_t total_resident_bytes; // Total bytes provisioned by the partition.
183 size_t total_active_bytes; // Total active bytes in the partition.
184 size_t total_decommittable_bytes; // Total bytes that could be decommitted.
185 size_t total_discardable_bytes; // Total bytes that could be discarded.
186 };
187
188 // Struct used to retrieve memory statistics about a partition bucket. Used by
189 // PartitionStatsDumper implementation.
190 struct PartitionBucketMemoryStats {
191 bool is_valid; // Used to check if the stats is valid.
192 bool is_direct_map; // True if this is a direct mapping; size will not be
193 // unique.
194 uint32_t bucket_slot_size; // The size of the slot in bytes.
195 uint32_t allocated_page_size; // Total size the partition page allocated from
196 // the system.
197 uint32_t active_bytes; // Total active bytes used in the bucket.
198 uint32_t resident_bytes; // Total bytes provisioned in the bucket.
199 uint32_t decommittable_bytes; // Total bytes that could be decommitted.
200 uint32_t discardable_bytes; // Total bytes that could be discarded.
201 uint32_t num_full_pages; // Number of pages with all slots allocated.
202 uint32_t num_active_pages; // Number of pages that have at least one
203 // provisioned slot.
204 uint32_t num_empty_pages; // Number of pages that are empty
205 // but not decommitted.
206 uint32_t num_decommitted_pages; // Number of pages that are empty
207 // and decommitted.
208 };
209
210 // Interface that is passed to PartitionDumpStats and
211 // PartitionDumpStatsGeneric for using the memory statistics.
212 class BASE_EXPORT PartitionStatsDumper {
213 public:
214 // Called to dump total memory used by partition, once per partition.
215 virtual void PartitionDumpTotals(const char* partition_name,
216 const PartitionMemoryStats*) = 0;
217
218 // Called to dump stats about buckets, for each bucket.
219 virtual void PartitionsDumpBucketStats(const char* partition_name,
220 const PartitionBucketMemoryStats*) = 0;
221 };
222
223 BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
224
225 // PartitionAlloc supports setting hooks to observe allocations/frees as they
226 // occur as well as 'override' hooks that allow overriding those operations.
227 class BASE_EXPORT PartitionAllocHooks {
228 public:
229 // Log allocation and free events.
230 typedef void AllocationObserverHook(void* address,
231 size_t size,
232 const char* type_name);
233 typedef void FreeObserverHook(void* address);
234
235 // If it returns true, the allocation has been overridden with the pointer in
236 // *out.
237 typedef bool AllocationOverrideHook(void** out,
238 int flags,
239 size_t size,
240 const char* type_name);
241 // If it returns true, then the allocation was overridden and has been freed.
242 typedef bool FreeOverrideHook(void* address);
243 // If it returns true, the underlying allocation is overridden and *out holds
244 // the size of the underlying allocation.
245 typedef bool ReallocOverrideHook(size_t* out, void* address);
246
247 // To unhook, call Set*Hooks with nullptrs.
248 static void SetObserverHooks(AllocationObserverHook* alloc_hook,
249 FreeObserverHook* free_hook);
250 static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
251 FreeOverrideHook* free_hook,
252 ReallocOverrideHook realloc_hook);
253
254 // Helper method to check whether hooks are enabled. This is an optimization
255 // so that if a function needs to call observer and override hooks in two
256 // different places this value can be cached and only loaded once.
AreHooksEnabled()257 static bool AreHooksEnabled() {
258 return hooks_enabled_.load(std::memory_order_relaxed);
259 }
260
261 static void AllocationObserverHookIfEnabled(void* address,
262 size_t size,
263 const char* type_name);
264 static bool AllocationOverrideHookIfEnabled(void** out,
265 int flags,
266 size_t size,
267 const char* type_name);
268
269 static void FreeObserverHookIfEnabled(void* address);
270 static bool FreeOverrideHookIfEnabled(void* address);
271
272 static void ReallocObserverHookIfEnabled(void* old_address,
273 void* new_address,
274 size_t size,
275 const char* type_name);
276 static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
277
278 private:
279 // Single bool that is used to indicate whether observer or allocation hooks
280 // are set to reduce the numbers of loads required to check whether hooking is
281 // enabled.
282 static std::atomic<bool> hooks_enabled_;
283
284 // Lock used to synchronize Set*Hooks calls.
285 static subtle::SpinLock set_hooks_lock_;
286
287 static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
288 static std::atomic<FreeObserverHook*> free_observer_hook_;
289
290 static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
291 static std::atomic<FreeOverrideHook*> free_override_hook_;
292 static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
293 };
294
Alloc(size_t size,const char * type_name)295 ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
296 return AllocFlags(0, size, type_name);
297 }
298
AllocFlags(int flags,size_t size,const char * type_name)299 ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
300 size_t size,
301 const char* type_name) {
302 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
303 CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
304 void* result = malloc(size);
305 CHECK(result);
306 return result;
307 #else
308 DCHECK(max_allocation == 0 || size <= max_allocation);
309 void* result;
310 const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
311 if (UNLIKELY(hooks_enabled)) {
312 if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
313 size, type_name)) {
314 PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
315 type_name);
316 return result;
317 }
318 }
319 size_t requested_size = size;
320 size = internal::PartitionCookieSizeAdjustAdd(size);
321 DCHECK(initialized);
322 size_t index = size >> kBucketShift;
323 DCHECK(index < num_buckets);
324 DCHECK(size == index << kBucketShift);
325 internal::PartitionBucket* bucket = &buckets()[index];
326 result = AllocFromBucket(bucket, flags, size);
327 if (UNLIKELY(hooks_enabled)) {
328 PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
329 type_name);
330 }
331 return result;
332 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
333 }
334
PartitionAllocSupportsGetSize()335 ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
336 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
337 return false;
338 #else
339 return true;
340 #endif
341 }
342
PartitionAllocGetSize(void * ptr)343 ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
344 // No need to lock here. Only |ptr| being freed by another thread could
345 // cause trouble, and the caller is responsible for that not happening.
346 DCHECK(PartitionAllocSupportsGetSize());
347 ptr = internal::PartitionCookieFreePointerAdjust(ptr);
348 internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
349 // TODO(palmer): See if we can afford to make this a CHECK.
350 DCHECK(internal::PartitionRootBase::IsValidPage(page));
351 size_t size = page->bucket->slot_size;
352 return internal::PartitionCookieSizeAdjustSubtract(size);
353 }
354
PartitionFree(void * ptr)355 ALWAYS_INLINE void PartitionFree(void* ptr) {
356 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
357 free(ptr);
358 #else
359 // TODO(palmer): Check ptr alignment before continuing. Shall we do the check
360 // inside PartitionCookieFreePointerAdjust?
361 if (PartitionAllocHooks::AreHooksEnabled()) {
362 PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
363 if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
364 return;
365 }
366
367 ptr = internal::PartitionCookieFreePointerAdjust(ptr);
368 internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
369 // TODO(palmer): See if we can afford to make this a CHECK.
370 DCHECK(internal::PartitionRootBase::IsValidPage(page));
371 page->Free(ptr);
372 #endif
373 }
374
PartitionGenericSizeToBucket(PartitionRootGeneric * root,size_t size)375 ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket(
376 PartitionRootGeneric* root,
377 size_t size) {
378 size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
379 // The order index is simply the next few bits after the most significant bit.
380 size_t order_index = (size >> root->order_index_shifts[order]) &
381 (kGenericNumBucketsPerOrder - 1);
382 // And if the remaining bits are non-zero we must bump the bucket up.
383 size_t sub_order_index = size & root->order_sub_index_masks[order];
384 internal::PartitionBucket* bucket =
385 root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
386 order_index + !!sub_order_index];
387 CHECK(bucket);
388 DCHECK(!bucket->slot_size || bucket->slot_size >= size);
389 DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
390 return bucket;
391 }
392
PartitionAllocGenericFlags(PartitionRootGeneric * root,int flags,size_t size,const char * type_name)393 ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
394 int flags,
395 size_t size,
396 const char* type_name) {
397 DCHECK(flags < PartitionAllocLastFlag << 1);
398
399 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
400 CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
401 const bool zero_fill = flags & PartitionAllocZeroFill;
402 void* result = zero_fill ? calloc(1, size) : malloc(size);
403 CHECK(result || flags & PartitionAllocReturnNull);
404 return result;
405 #else
406 DCHECK(root->initialized);
407 // Only SizeSpecificPartitionAllocator should use max_allocation.
408 DCHECK(root->max_allocation == 0);
409 void* result;
410 const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
411 if (UNLIKELY(hooks_enabled)) {
412 if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
413 size, type_name)) {
414 PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
415 type_name);
416 return result;
417 }
418 }
419 size_t requested_size = size;
420 size = internal::PartitionCookieSizeAdjustAdd(size);
421 internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
422 {
423 subtle::SpinLock::Guard guard(root->lock);
424 result = root->AllocFromBucket(bucket, flags, size);
425 }
426 if (UNLIKELY(hooks_enabled)) {
427 PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
428 type_name);
429 }
430
431 return result;
432 #endif
433 }
434
Alloc(size_t size,const char * type_name)435 ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
436 const char* type_name) {
437 return PartitionAllocGenericFlags(this, 0, size, type_name);
438 }
439
AllocFlags(int flags,size_t size,const char * type_name)440 ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags,
441 size_t size,
442 const char* type_name) {
443 return PartitionAllocGenericFlags(this, flags, size, type_name);
444 }
445
Free(void * ptr)446 ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
447 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
448 free(ptr);
449 #else
450 DCHECK(initialized);
451
452 if (UNLIKELY(!ptr))
453 return;
454
455 if (PartitionAllocHooks::AreHooksEnabled()) {
456 PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
457 if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
458 return;
459 }
460
461 ptr = internal::PartitionCookieFreePointerAdjust(ptr);
462 internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
463 // TODO(palmer): See if we can afford to make this a CHECK.
464 DCHECK(IsValidPage(page));
465 {
466 subtle::SpinLock::Guard guard(lock);
467 page->Free(ptr);
468 }
469 #endif
470 }
471
472 BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
473 int flags,
474 void* ptr,
475 size_t new_size,
476 const char* type_name);
477
ActualSize(size_t size)478 ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
479 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
480 return size;
481 #else
482 DCHECK(initialized);
483 size = internal::PartitionCookieSizeAdjustAdd(size);
484 internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
485 if (LIKELY(!bucket->is_direct_mapped())) {
486 size = bucket->slot_size;
487 } else if (size > kGenericMaxDirectMapped) {
488 // Too large to allocate => return the size unchanged.
489 } else {
490 size = internal::PartitionBucket::get_direct_map_size(size);
491 }
492 return internal::PartitionCookieSizeAdjustSubtract(size);
493 #endif
494 }
495
496 template <size_t N>
497 class SizeSpecificPartitionAllocator {
498 public:
SizeSpecificPartitionAllocator()499 SizeSpecificPartitionAllocator() {
500 memset(actual_buckets_, 0,
501 sizeof(internal::PartitionBucket) * pdfium::size(actual_buckets_));
502 }
503 ~SizeSpecificPartitionAllocator() = default;
504 static const size_t kMaxAllocation = N - kAllocationGranularity;
505 static const size_t kNumBuckets = N / kAllocationGranularity;
init()506 void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); }
root()507 ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
508
509 private:
510 PartitionRoot partition_root_;
511 internal::PartitionBucket actual_buckets_[kNumBuckets];
512 };
513
514 class BASE_EXPORT PartitionAllocatorGeneric {
515 public:
516 PartitionAllocatorGeneric();
517 ~PartitionAllocatorGeneric();
518
init()519 void init() { partition_root_.Init(); }
root()520 ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
521
522 private:
523 PartitionRootGeneric partition_root_;
524 };
525
526 } // namespace base
527 } // namespace pdfium
528
529 #endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
530