1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
7
8 // DESCRIPTION
9 // PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analogous
10 // to malloc() and free().
11 //
12 // The main difference is that a PartitionRoot object must be supplied to these
13 // functions, representing a specific "heap partition" that will be used to
14 // satisfy the allocation. Different partitions are guaranteed to exist in
15 // separate address spaces, including being separate from the main system
16 // heap. If the contained objects are all freed, physical memory is returned to
17 // the system but the address space remains reserved. See PartitionAlloc.md for
18 // other security properties PartitionAlloc provides.
19 //
20 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
21 // PartitionAllocator classes. To minimize the instruction count to the fullest
22 // extent possible, the PartitionRoot is really just a header adjacent to other
23 // data areas provided by the allocator class.
24 //
25 // The constraints for PartitionRoot::Alloc() are:
26 // - Multi-threaded use against a single partition is ok; locking is handled.
27 // - Allocations of any arbitrary size can be handled (subject to a limit of
28 // INT_MAX bytes for security reasons).
29 // - Bucketing is by approximate size, for example an allocation of 4000 bytes
30 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
31 // keep worst-case waste to ~10%.
32
33 #include <algorithm>
34 #include <atomic>
35 #include <cstddef>
36 #include <cstdint>
37
38 #include "base/allocator/partition_allocator/address_pool_manager_types.h"
39 #include "base/allocator/partition_allocator/allocation_guard.h"
40 #include "base/allocator/partition_allocator/chromecast_buildflags.h"
41 #include "base/allocator/partition_allocator/freeslot_bitmap.h"
42 #include "base/allocator/partition_allocator/page_allocator.h"
43 #include "base/allocator/partition_allocator/partition_address_space.h"
44 #include "base/allocator/partition_allocator/partition_alloc-inl.h"
45 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
46 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
47 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
48 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
49 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
50 #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
51 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
52 #include "base/allocator/partition_allocator/partition_alloc_check.h"
53 #include "base/allocator/partition_allocator/partition_alloc_config.h"
54 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
55 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
56 #include "base/allocator/partition_allocator/partition_alloc_hooks.h"
57 #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
58 #include "base/allocator/partition_allocator/partition_bucket_lookup.h"
59 #include "base/allocator/partition_allocator/partition_cookie.h"
60 #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
61 #include "base/allocator/partition_allocator/partition_freelist_entry.h"
62 #include "base/allocator/partition_allocator/partition_lock.h"
63 #include "base/allocator/partition_allocator/partition_oom.h"
64 #include "base/allocator/partition_allocator/partition_page.h"
65 #include "base/allocator/partition_allocator/partition_ref_count.h"
66 #include "base/allocator/partition_allocator/pkey.h"
67 #include "base/allocator/partition_allocator/reservation_offset_table.h"
68 #include "base/allocator/partition_allocator/tagging.h"
69 #include "base/allocator/partition_allocator/thread_cache.h"
70 #include "build/build_config.h"
71
72 #if BUILDFLAG(USE_STARSCAN)
73 #include "base/allocator/partition_allocator/starscan/pcscan.h"
74 #endif
75
76 // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
77 // size as other alloc code.
78 #define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
79 if (size > partition_alloc::internal::MaxDirectMapped()) { \
80 if (flags & AllocFlags::kReturnNull) { \
81 return nullptr; \
82 } \
83 PA_CHECK(false); \
84 }
85
86 namespace partition_alloc::internal {
87
88 // We want this size to be big enough that we have time to start up other
89 // scripts _before_ we wrap around.
90 static constexpr size_t kAllocInfoSize = 1 << 24;
91
92 struct AllocInfo {
93 std::atomic<size_t> index{0};
94 struct {
95 uintptr_t addr;
96 size_t size;
97 } allocs[kAllocInfoSize] = {};
98 };
99
100 #if BUILDFLAG(RECORD_ALLOC_INFO)
101 extern AllocInfo g_allocs;
102
103 void RecordAllocOrFree(uintptr_t addr, size_t size);
104 #endif // BUILDFLAG(RECORD_ALLOC_INFO)
105 } // namespace partition_alloc::internal
106
107 namespace partition_alloc {
108
109 namespace internal {
110 // Avoid including partition_address_space.h from this .h file, by moving the
111 // call to IsManagedByPartitionAllocBRPPool into the .cc file.
112 #if BUILDFLAG(PA_DCHECK_IS_ON)
113 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
114 void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address);
115 #else
116 PA_ALWAYS_INLINE void DCheckIfManagedByPartitionAllocBRPPool(
117 uintptr_t address) {}
118 #endif
119
120 #if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
121 class PartitionRootEnumerator;
122 #endif
123
124 } // namespace internal
125
126 // Bit flag constants used to purge memory. See PartitionRoot::PurgeMemory.
127 //
128 // In order to support bit operations like `flag_a | flag_b`, the old-fashioned
129 // enum (+ surrounding named struct) is used instead of enum class.
130 struct PurgeFlags {
131 enum : int {
132 // Decommitting the ring list of empty slot spans is reasonably fast.
133 kDecommitEmptySlotSpans = 1 << 0,
134 // Discarding unused system pages is slower, because it involves walking all
135 // freelists in all active slot spans of all buckets >= system page
136 // size. It often frees a similar amount of memory to decommitting the empty
137 // slot spans, though.
138 kDiscardUnusedSystemPages = 1 << 1,
139 // Aggressively reclaim memory. This is meant to be used in low-memory
140 // situations, not for periodic memory reclaiming.
141 kAggressiveReclaim = 1 << 2,
142 };
143 };
144
145 // Options struct used to configure PartitionRoot and PartitionAllocator.
146 struct PartitionOptions {
147 enum class AlignedAlloc : uint8_t {
148 // By default all allocations will be aligned to `kAlignment`,
149 // likely to be 8B or 16B depending on platforms and toolchains.
150 // AlignedAlloc() allows to enforce higher alignment.
151 // This option determines whether it is supported for the partition.
152 // Allowing AlignedAlloc() comes at a cost of disallowing extras in front
153 // of the allocation.
154 kDisallowed,
155 kAllowed,
156 };
157
158 enum class ThreadCache : uint8_t {
159 kDisabled,
160 kEnabled,
161 };
162
163 enum class Quarantine : uint8_t {
164 kDisallowed,
165 kAllowed,
166 };
167
168 enum class Cookie : uint8_t {
169 kDisallowed,
170 kAllowed,
171 };
172
173 enum class BackupRefPtr : uint8_t {
174 kDisabled,
175 kEnabled,
176 };
177
178 enum class BackupRefPtrZapping : uint8_t {
179 kDisabled,
180 kEnabled,
181 };
182
183 enum class AddDummyRefCount : uint8_t {
184 kDisabled,
185 kEnabled,
186 };
187
188 enum class UseConfigurablePool : uint8_t {
189 kNo,
190 kIfAvailable,
191 };
192
193 // Constructor to suppress aggregate initialization.
194 constexpr PartitionOptions(
195 AlignedAlloc aligned_alloc,
196 ThreadCache thread_cache,
197 Quarantine quarantine,
198 Cookie cookie,
199 BackupRefPtr backup_ref_ptr,
200 BackupRefPtrZapping backup_ref_ptr_zapping,
201 UseConfigurablePool use_configurable_pool,
202 AddDummyRefCount add_dummy_ref_count = AddDummyRefCount::kDisabled
203 #if BUILDFLAG(ENABLE_PKEYS)
204 ,
205 int pkey = internal::kDefaultPkey
206 #endif
207 )
aligned_allocPartitionOptions208 : aligned_alloc(aligned_alloc),
209 thread_cache(thread_cache),
210 quarantine(quarantine),
211 cookie(cookie),
212 backup_ref_ptr(backup_ref_ptr),
213 backup_ref_ptr_zapping(backup_ref_ptr_zapping),
214 use_configurable_pool(use_configurable_pool)
215 #if BUILDFLAG(ENABLE_PKEYS)
216 ,
217 pkey(pkey)
218 #endif
219 {
220 }
221
222 AlignedAlloc aligned_alloc;
223 ThreadCache thread_cache;
224 Quarantine quarantine;
225 Cookie cookie;
226 BackupRefPtr backup_ref_ptr;
227 BackupRefPtrZapping backup_ref_ptr_zapping;
228 UseConfigurablePool use_configurable_pool;
229 AddDummyRefCount add_dummy_ref_count = AddDummyRefCount::kDisabled;
230 #if BUILDFLAG(ENABLE_PKEYS)
231 int pkey;
232 #endif
233 };
234
235 // Never instantiate a PartitionRoot directly, instead use
236 // PartitionAllocator.
237 template <bool thread_safe>
PA_COMPONENT_EXPORT(PARTITION_ALLOC)238 struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
239 using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
240 using Page = internal::PartitionPage<thread_safe>;
241 using Bucket = internal::PartitionBucket<thread_safe>;
242 using FreeListEntry = internal::PartitionFreelistEntry;
243 using SuperPageExtentEntry =
244 internal::PartitionSuperPageExtentEntry<thread_safe>;
245 using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>;
246 #if BUILDFLAG(USE_STARSCAN)
247 using PCScan = internal::PCScan;
248 #endif
249
250 enum class QuarantineMode : uint8_t {
251 kAlwaysDisabled,
252 kDisabledByDefault,
253 kEnabled,
254 };
255
256 enum class ScanMode : uint8_t {
257 kDisabled,
258 kEnabled,
259 };
260
261 enum class BucketDistribution : uint8_t { kDefault, kDenser };
262
263 // Flags accessed on fast paths.
264 //
265 // Careful! PartitionAlloc's performance is sensitive to its layout. Please
266 // put the fast-path objects in the struct below, and the other ones after
267 // the union..
268 struct Flags {
269 // Defines whether objects should be quarantined for this root.
270 QuarantineMode quarantine_mode;
271
272 // Defines whether the root should be scanned.
273 ScanMode scan_mode;
274
275 // It's important to default to the 'default' distribution, otherwise a
276 // switch from 'dense' -> 'default' would leave some buckets with dirty
277 // memory forever, since no memory would be allocated from these, their
278 // freelist would typically not be empty, making these unreclaimable.
279 BucketDistribution bucket_distribution = BucketDistribution::kDefault;
280
281 bool with_thread_cache = false;
282
283 bool allow_aligned_alloc;
284 bool allow_cookie;
285 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
286 bool brp_enabled_;
287 bool brp_zapping_enabled_;
288 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
289 bool mac11_malloc_size_hack_enabled_ = false;
290 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
291 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
292 bool use_configurable_pool;
293
294 #if BUILDFLAG(ENABLE_PKEYS)
295 int pkey;
296 #endif
297
298 #if PA_CONFIG(EXTRAS_REQUIRED)
299 uint32_t extras_size;
300 uint32_t extras_offset;
301 #else
302 // Teach the compiler that code can be optimized in builds that use no
303 // extras.
304 static inline constexpr uint32_t extras_size = 0;
305 static inline constexpr uint32_t extras_offset = 0;
306 #endif // PA_CONFIG(EXTRAS_REQUIRED)
307 };
308
309 // Read-mostly flags.
310 union {
311 Flags flags;
312
313 // The flags above are accessed for all (de)allocations, and are mostly
314 // read-only. They should not share a cacheline with the data below, which
315 // is only touched when the lock is taken.
316 uint8_t one_cacheline[internal::kPartitionCachelineSize];
317 };
318
319 // Not used on the fastest path (thread cache allocations), but on the fast
320 // path of the central allocator.
321 static_assert(thread_safe, "Only the thread-safe root is supported.");
322 ::partition_alloc::internal::Lock lock_;
323
324 Bucket buckets[internal::kNumBuckets] = {};
325 Bucket sentinel_bucket{};
326
327 // All fields below this comment are not accessed on the fast path.
328 bool initialized = false;
329
330 // Bookkeeping.
331 // - total_size_of_super_pages - total virtual address space for normal bucket
332 // super pages
333 // - total_size_of_direct_mapped_pages - total virtual address space for
334 // direct-map regions
335 // - total_size_of_committed_pages - total committed pages for slots (doesn't
336 // include metadata, bitmaps (if any), or any data outside or regions
337 // described in #1 and #2)
338 // Invariant: total_size_of_allocated_bytes <=
339 // total_size_of_committed_pages <
340 // total_size_of_super_pages +
341 // total_size_of_direct_mapped_pages.
342 // Invariant: total_size_of_committed_pages <= max_size_of_committed_pages.
343 // Invariant: total_size_of_allocated_bytes <= max_size_of_allocated_bytes.
344 // Invariant: max_size_of_allocated_bytes <= max_size_of_committed_pages.
345 // Since all operations on the atomic variables have relaxed semantics, we
346 // don't check these invariants with DCHECKs.
347 std::atomic<size_t> total_size_of_committed_pages{0};
348 std::atomic<size_t> max_size_of_committed_pages{0};
349 std::atomic<size_t> total_size_of_super_pages{0};
350 std::atomic<size_t> total_size_of_direct_mapped_pages{0};
351 size_t total_size_of_allocated_bytes PA_GUARDED_BY(lock_) = 0;
352 size_t max_size_of_allocated_bytes PA_GUARDED_BY(lock_) = 0;
353 // Atomic, because system calls can be made without the lock held.
354 std::atomic<uint64_t> syscall_count{};
355 std::atomic<uint64_t> syscall_total_time_ns{};
356 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
357 std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
358 std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
359 std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
360 std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
361 #endif
362 // Slot span memory which has been provisioned, and is currently unused as
363 // it's part of an empty SlotSpan. This is not clean memory, since it has
364 // either been used for a memory allocation, and/or contains freelist
365 // entries. But it might have been moved to swap. Note that all this memory
366 // can be decommitted at any time.
367 size_t empty_slot_spans_dirty_bytes PA_GUARDED_BY(lock_) = 0;
368
369 // Only tolerate up to |total_size_of_committed_pages >>
370 // max_empty_slot_spans_dirty_bytes_shift| dirty bytes in empty slot
371 // spans. That is, the default value of 3 tolerates up to 1/8. Since
372 // |empty_slot_spans_dirty_bytes| is never strictly larger than
373 // total_size_of_committed_pages, setting this to 0 removes the cap. This is
374 // useful to make tests deterministic and easier to reason about.
375 int max_empty_slot_spans_dirty_bytes_shift = 3;
376
377 uintptr_t next_super_page = 0;
378 uintptr_t next_partition_page = 0;
379 uintptr_t next_partition_page_end = 0;
380 SuperPageExtentEntry* current_extent = nullptr;
381 SuperPageExtentEntry* first_extent = nullptr;
382 DirectMapExtent* direct_map_list PA_GUARDED_BY(lock_) = nullptr;
383 SlotSpan*
384 global_empty_slot_span_ring[internal::kMaxFreeableSpans] PA_GUARDED_BY(
385 lock_) = {};
386 int16_t global_empty_slot_span_ring_index PA_GUARDED_BY(lock_) = 0;
387 int16_t global_empty_slot_span_ring_size PA_GUARDED_BY(lock_) =
388 internal::kDefaultEmptySlotSpanRingSize;
389
390 // Integrity check = ~reinterpret_cast<uintptr_t>(this).
391 uintptr_t inverted_self = 0;
392 std::atomic<int> thread_caches_being_constructed_{0};
393
394 bool quarantine_always_for_testing = false;
395
396 PartitionRoot()
397 : flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
398 explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
399 // TODO(tasak): remove ~PartitionRoot() after confirming all tests
400 // don't need ~PartitionRoot().
401 ~PartitionRoot();
402
403 // This will unreserve any space in the pool that the PartitionRoot is
404 // using. This is needed because many tests create and destroy many
405 // PartitionRoots over the lifetime of a process, which can exhaust the
406 // pool and cause tests to fail.
407 void DestructForTesting();
408
409 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
410 void EnableMac11MallocSizeHackForTesting();
411 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
412
413 // Public API
414 //
415 // Allocates out of the given bucket. Properly, this function should probably
416 // be in PartitionBucket, but because the implementation needs to be inlined
417 // for performance, and because it needs to inspect SlotSpanMetadata,
418 // it becomes impossible to have it in PartitionBucket as this causes a
419 // cyclical dependency on SlotSpanMetadata function implementations.
420 //
421 // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
422 // preserves the layering of the includes.
423 void Init(PartitionOptions);
424
425 void EnableThreadCacheIfSupported();
426
427 PA_ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
428 PA_ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
429 // These two functions work unconditionally for normal buckets.
430 // For direct map, they only work for the first super page of a reservation,
431 // (see partition_alloc_constants.h for the direct map allocation layout).
432 // In particular, the functions always work for a pointer to the start of a
433 // reservation.
434 PA_ALWAYS_INLINE static PartitionRoot* FromFirstSuperPage(
435 uintptr_t super_page);
436 PA_ALWAYS_INLINE static PartitionRoot* FromAddrInFirstSuperpage(
437 uintptr_t address);
438
439 PA_ALWAYS_INLINE void DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
440 size_t len)
441 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
442 PA_ALWAYS_INLINE void IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
443 size_t len,
444 size_t raw_size)
445 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
446 PA_ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
447 PA_ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
448 PA_ALWAYS_INLINE void DecommitSystemPagesForData(
449 uintptr_t address,
450 size_t length,
451 PageAccessibilityDisposition accessibility_disposition)
452 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
453 PA_ALWAYS_INLINE void RecommitSystemPagesForData(
454 uintptr_t address,
455 size_t length,
456 PageAccessibilityDisposition accessibility_disposition)
457 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
458 PA_ALWAYS_INLINE bool TryRecommitSystemPagesForData(
459 uintptr_t address,
460 size_t length,
461 PageAccessibilityDisposition accessibility_disposition)
462 PA_LOCKS_EXCLUDED(lock_);
463
464 [[noreturn]] PA_NOINLINE void OutOfMemory(size_t size);
465
466 // Returns a pointer aligned on |alignment|, or nullptr.
467 //
468 // |alignment| has to be a power of two and a multiple of sizeof(void*) (as in
469 // posix_memalign() for POSIX systems). The returned pointer may include
470 // padding, and can be passed to |Free()| later.
471 //
472 // NOTE: This is incompatible with anything that adds extras before the
473 // returned pointer, such as ref-count.
474 PA_ALWAYS_INLINE void* AlignedAllocWithFlags(unsigned int flags,
475 size_t alignment,
476 size_t requested_size);
477
478 // PartitionAlloc supports multiple partitions, and hence multiple callers to
479 // these functions. Setting PA_ALWAYS_INLINE bloats code, and can be
480 // detrimental to performance, for instance if multiple callers are hot (by
481 // increasing cache footprint). Set PA_NOINLINE on the "basic" top-level
482 // functions to mitigate that for "vanilla" callers.
483 PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t requested_size,
484 const char* type_name) PA_MALLOC_ALIGNED;
485 PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlags(unsigned int flags,
486 size_t requested_size,
487 const char* type_name)
488 PA_MALLOC_ALIGNED;
489 // Same as |AllocWithFlags()|, but allows specifying |slot_span_alignment|. It
490 // has to be a multiple of partition page size, greater than 0 and no greater
491 // than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
492 // special action is taken as PartitoinAlloc naturally guarantees this
493 // alignment, otherwise a sub-optimial allocation strategy is used to
494 // guarantee the higher-order alignment.
495 PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsInternal(
496 unsigned int flags,
497 size_t requested_size,
498 size_t slot_span_alignment,
499 const char* type_name) PA_MALLOC_ALIGNED;
500 // Same as |AllocWithFlags()|, but bypasses the allocator hooks.
501 //
502 // This is separate from AllocWithFlags() because other callers of
503 // AllocWithFlags() should not have the extra branch checking whether the
504 // hooks should be ignored or not. This is the same reason why |FreeNoHooks()|
505 // exists. However, |AlignedAlloc()| and |Realloc()| have few callers, so
506 // taking the extra branch in the non-malloc() case doesn't hurt. In addition,
507 // for the malloc() case, the compiler correctly removes the branch, since
508 // this is marked |PA_ALWAYS_INLINE|.
509 PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsNoHooks(
510 unsigned int flags,
511 size_t requested_size,
512 size_t slot_span_alignment) PA_MALLOC_ALIGNED;
513
514 PA_NOINLINE void* Realloc(void* ptr,
515 size_t newize,
516 const char* type_name) PA_MALLOC_ALIGNED;
517 // Overload that may return nullptr if reallocation isn't possible. In this
518 // case, |ptr| remains valid.
519 PA_NOINLINE void* TryRealloc(void* ptr,
520 size_t new_size,
521 const char* type_name) PA_MALLOC_ALIGNED;
522 PA_NOINLINE void* ReallocWithFlags(unsigned int flags,
523 void* ptr,
524 size_t new_size,
525 const char* type_name) PA_MALLOC_ALIGNED;
526 PA_NOINLINE static void Free(void* object);
527 PA_ALWAYS_INLINE static void FreeWithFlags(unsigned int flags, void* object);
528 // Same as |Free()|, bypasses the allocator hooks.
529 PA_ALWAYS_INLINE static void FreeNoHooks(void* object);
530 // Immediately frees the pointer bypassing the quarantine. |slot_start| is the
531 // beginning of the slot that contains |object|.
532 PA_ALWAYS_INLINE void FreeNoHooksImmediate(void* object,
533 SlotSpan* slot_span,
534 uintptr_t slot_start);
535
536 PA_ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
537 // Same as GetUsableSize() except it adjusts the return value for macOS 11
538 // malloc_size() hack.
539 PA_ALWAYS_INLINE static size_t GetUsableSizeWithMac11MallocSizeHack(
540 void* ptr);
541
542 PA_ALWAYS_INLINE PageAccessibilityConfiguration GetPageAccessibility() const;
543 PA_ALWAYS_INLINE PageAccessibilityConfiguration
544 PageAccessibilityWithPkeyIfEnabled(
545 PageAccessibilityConfiguration::Permissions) const;
546
547 PA_ALWAYS_INLINE size_t
548 AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
549 PA_ALWAYS_INLINE size_t
550 AllocationCapacityFromRequestedSize(size_t size) const;
551
552 PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
553
554 // Frees memory from this partition, if possible, by decommitting pages or
555 // even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
556 void PurgeMemory(int flags);
557
558 // Reduces the size of the empty slot spans ring, until the dirty size is <=
559 // |limit|.
560 void ShrinkEmptySlotSpansRing(size_t limit)
561 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
562 // The empty slot span ring starts "small", can be enlarged later. This
563 // improves performance by performing fewer system calls, at the cost of more
564 // memory usage.
565 void EnableLargeEmptySlotSpanRing() {
566 ::partition_alloc::internal::ScopedGuard locker{lock_};
567 global_empty_slot_span_ring_size = internal::kMaxFreeableSpans;
568 }
569
570 void DumpStats(const char* partition_name,
571 bool is_light_dump,
572 PartitionStatsDumper* partition_stats_dumper);
573
574 static void DeleteForTesting(PartitionRoot* partition_root);
575 void ResetForTesting(bool allow_leaks);
576 void ResetBookkeepingForTesting();
577
578 PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
579 return flags.bucket_distribution;
580 }
581
582 static uint16_t SizeToBucketIndex(size_t size,
583 BucketDistribution bucket_distribution);
584
585 PA_ALWAYS_INLINE void FreeInSlotSpan(uintptr_t slot_start,
586 SlotSpan* slot_span)
587 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
588
589 // Frees memory, with |slot_start| as returned by |RawAlloc()|.
590 PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start);
591 PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start, SlotSpan* slot_span)
592 PA_LOCKS_EXCLUDED(lock_);
593
594 PA_ALWAYS_INLINE void RawFreeBatch(FreeListEntry* head,
595 FreeListEntry* tail,
596 size_t size,
597 SlotSpan* slot_span)
598 PA_LOCKS_EXCLUDED(lock_);
599
600 PA_ALWAYS_INLINE void RawFreeWithThreadCache(uintptr_t slot_start,
601 SlotSpan* slot_span);
602
603 // This is safe to do because we are switching to a bucket distribution with
604 // more buckets, meaning any allocations we have done before the switch are
605 // guaranteed to have a bucket under the new distribution when they are
606 // eventually deallocated. We do not need synchronization here.
607 void SwitchToDenserBucketDistribution() {
608 flags.bucket_distribution = BucketDistribution::kDenser;
609 }
610 // Switching back to the less dense bucket distribution is ok during tests.
611 // At worst, we end up with deallocations that are sent to a bucket that we
612 // cannot allocate from, which will not cause problems besides wasting
613 // memory.
614 void ResetBucketDistributionForTesting() {
615 flags.bucket_distribution = BucketDistribution::kDefault;
616 }
617
618 ThreadCache* thread_cache_for_testing() const {
619 return flags.with_thread_cache ? ThreadCache::Get() : nullptr;
620 }
621 size_t get_total_size_of_committed_pages() const {
622 return total_size_of_committed_pages.load(std::memory_order_relaxed);
623 }
624 size_t get_max_size_of_committed_pages() const {
625 return max_size_of_committed_pages.load(std::memory_order_relaxed);
626 }
627
628 size_t get_total_size_of_allocated_bytes() const {
629 // Since this is only used for bookkeeping, we don't care if the value is
630 // stale, so no need to get a lock here.
631 return PA_TS_UNCHECKED_READ(total_size_of_allocated_bytes);
632 }
633
634 size_t get_max_size_of_allocated_bytes() const {
635 // Since this is only used for bookkeeping, we don't care if the value is
636 // stale, so no need to get a lock here.
637 return PA_TS_UNCHECKED_READ(max_size_of_allocated_bytes);
638 }
639
640 internal::pool_handle ChoosePool() const {
641 #if BUILDFLAG(HAS_64_BIT_POINTERS)
642 if (flags.use_configurable_pool) {
643 PA_DCHECK(IsConfigurablePoolAvailable());
644 return internal::kConfigurablePoolHandle;
645 }
646 #endif
647 #if BUILDFLAG(ENABLE_PKEYS)
648 if (flags.pkey != internal::kDefaultPkey) {
649 return internal::kPkeyPoolHandle;
650 }
651 #endif
652 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
653 return brp_enabled() ? internal::kBRPPoolHandle
654 : internal::kRegularPoolHandle;
655 #else
656 return internal::kRegularPoolHandle;
657 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
658 }
659
660 PA_ALWAYS_INLINE bool IsQuarantineAllowed() const {
661 return flags.quarantine_mode != QuarantineMode::kAlwaysDisabled;
662 }
663
664 PA_ALWAYS_INLINE bool IsQuarantineEnabled() const {
665 return flags.quarantine_mode == QuarantineMode::kEnabled;
666 }
667
668 PA_ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
669 if (PA_UNLIKELY(flags.quarantine_mode != QuarantineMode::kEnabled)) {
670 return false;
671 }
672 #if PA_CONFIG(HAS_MEMORY_TAGGING)
673 if (PA_UNLIKELY(quarantine_always_for_testing)) {
674 return true;
675 }
676 // If quarantine is enabled and the tag overflows, move the containing slot
677 // to quarantine, to prevent the attacker from exploiting a pointer that has
678 // an old tag.
679 if (PA_LIKELY(IsMemoryTaggingEnabled())) {
680 return internal::HasOverflowTag(object);
681 }
682 // Default behaviour if MTE is not enabled for this PartitionRoot.
683 return true;
684 #else
685 return true;
686 #endif
687 }
688
689 PA_ALWAYS_INLINE void SetQuarantineAlwaysForTesting(bool value) {
690 quarantine_always_for_testing = value;
691 }
692
693 PA_ALWAYS_INLINE bool IsScanEnabled() const {
694 // Enabled scan implies enabled quarantine.
695 PA_DCHECK(flags.scan_mode != ScanMode::kEnabled || IsQuarantineEnabled());
696 return flags.scan_mode == ScanMode::kEnabled;
697 }
698
699 PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
700 GetDirectMapMetadataAndGuardPagesSize() {
701 // Because we need to fake a direct-map region to look like a super page, we
702 // need to allocate more pages around the payload:
703 // - The first partition page is a combination of metadata and guard region.
704 // - We also add a trailing guard page. In most cases, a system page would
705 // suffice. But on 32-bit systems when BRP is on, we need a partition page
706 // to match granularity of the BRP pool bitmap. For cosistency, we'll use
707 // a partition page everywhere, which is cheap as it's uncommitted address
708 // space anyway.
709 return 2 * internal::PartitionPageSize();
710 }
711
712 PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
713 GetDirectMapSlotSize(size_t raw_size) {
714 // Caller must check that the size is not above the MaxDirectMapped()
715 // limit before calling. This also guards against integer overflow in the
716 // calculation here.
717 PA_DCHECK(raw_size <= internal::MaxDirectMapped());
718 return partition_alloc::internal::base::bits::AlignUp(
719 raw_size, internal::SystemPageSize());
720 }
721
722 PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize(
723 size_t padded_raw_size) {
724 // Caller must check that the size is not above the MaxDirectMapped()
725 // limit before calling. This also guards against integer overflow in the
726 // calculation here.
727 PA_DCHECK(padded_raw_size <= internal::MaxDirectMapped());
728 return partition_alloc::internal::base::bits::AlignUp(
729 padded_raw_size + GetDirectMapMetadataAndGuardPagesSize(),
730 internal::DirectMapAllocationGranularity());
731 }
732
733 PA_ALWAYS_INLINE size_t AdjustSize0IfNeeded(size_t size) const {
734 // There are known cases where allowing size 0 would lead to problems:
735 // 1. If extras are present only before allocation (e.g. BRP ref-count), the
736 // extras will fill the entire kAlignment-sized slot, leading to
737 // returning a pointer to the next slot. ReallocWithFlags() calls
738 // SlotSpanMetadata::FromObject() prior to subtracting extras, thus
739 // potentially getting a wrong slot span.
740 // 2. If we put BRP ref-count in the previous slot, that slot may be free.
741 // In this case, the slot needs to fit both, a free-list entry and a
742 // ref-count. If sizeof(PartitionRefCount) is 8, it fills the entire
743 // smallest slot on 32-bit systems (kSmallestBucket is 8), thus not
744 // leaving space for the free-list entry.
745 // 3. On macOS and iOS, PartitionGetSizeEstimate() is used for two purposes:
746 // as a zone dispatcher and as an underlying implementation of
747 // malloc_size(3). As a zone dispatcher, zero has a special meaning of
748 // "doesn't belong to this zone". When extras fill out the entire slot,
749 // the usable size is 0, thus confusing the zone dispatcher.
750 //
751 // To save ourselves a branch on this hot path, we could eliminate this
752 // check at compile time for cases not listed above. The #if statement would
753 // be rather complex. Then there is also the fear of the unknown. The
754 // existing cases were discovered through obscure, painful-to-debug crashes.
755 // Better save ourselves trouble with not-yet-discovered cases.
756 if (PA_UNLIKELY(size == 0)) {
757 return 1;
758 }
759 return size;
760 }
761
762 // Adjusts the size by adding extras. Also include the 0->1 adjustment if
763 // needed.
764 PA_ALWAYS_INLINE size_t AdjustSizeForExtrasAdd(size_t size) const {
765 size = AdjustSize0IfNeeded(size);
766 PA_DCHECK(size + flags.extras_size >= size);
767 return size + flags.extras_size;
768 }
769
770 // Adjusts the size by subtracing extras. Doesn't include the 0->1 adjustment,
771 // which leads to an asymmetry with AdjustSizeForExtrasAdd, but callers of
772 // AdjustSizeForExtrasSubtract either expect the adjustment to be included, or
773 // are indifferent.
774 PA_ALWAYS_INLINE size_t AdjustSizeForExtrasSubtract(size_t size) const {
775 return size - flags.extras_size;
776 }
777
778 PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
779 // TODO(bartekn): Check that |slot_start| is indeed a slot start.
780 return slot_start + flags.extras_offset;
781 }
782
783 PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
784 // TODO(bartekn): Check that |slot_start| is indeed a slot start.
785 return internal::TagAddr(SlotStartToObjectAddr(slot_start));
786 }
787
788 PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
789 return UntagPtr(object) - flags.extras_offset;
790 // TODO(bartekn): Check that the result is indeed a slot start.
791 }
792
793 bool brp_enabled() const {
794 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
795 return flags.brp_enabled_;
796 #else
797 return false;
798 #endif
799 }
800
801 bool brp_zapping_enabled() const {
802 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
803 return flags.brp_zapping_enabled_;
804 #else
805 return false;
806 #endif
807 }
808
809 PA_ALWAYS_INLINE bool uses_configurable_pool() const {
810 return flags.use_configurable_pool;
811 }
812
813 // To make tests deterministic, it is necessary to uncap the amount of memory
814 // waste incurred by empty slot spans. Otherwise, the size of various
815 // freelists, and committed memory becomes harder to reason about (and
816 // brittle) with a single thread, and non-deterministic with several.
817 void UncapEmptySlotSpanMemoryForTesting() {
818 max_empty_slot_spans_dirty_bytes_shift = 0;
819 }
820
821 // Enables the sorting of active slot spans in PurgeMemory().
822 static void EnableSortActiveSlotSpans();
823
824 private:
825 static inline bool sort_active_slot_spans_ = false;
826
827 // |buckets| has `kNumBuckets` elements, but we sometimes access it at index
828 // `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
829 // is enforced by a static_assert() in partition_root.cc, so this is
830 // fine. However, UBSAN is correctly pointing out that there is an
831 // out-of-bounds access, so disable it for these accesses.
832 //
833 // See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting
834 // this.
835 PA_ALWAYS_INLINE const Bucket& PA_NO_SANITIZE("undefined")
836 bucket_at(size_t i) const {
837 PA_DCHECK(i <= internal::kNumBuckets);
838 return buckets[i];
839 }
840
841 // Returns whether a |bucket| from |this| root is direct-mapped. This function
842 // does not touch |bucket|, contrary to PartitionBucket::is_direct_mapped().
843 //
844 // This is meant to be used in hot paths, and particularly *before* going into
845 // the thread cache fast path. Indeed, real-world profiles show that accessing
846 // an allocation's bucket is responsible for a sizable fraction of *total*
847 // deallocation time. This can be understood because
848 // - All deallocations have to access the bucket to know whether it is
849 // direct-mapped. If not (vast majority of allocations), it can go through
850 // the fast path, i.e. thread cache.
851 // - The bucket is relatively frequently written to, by *all* threads
852 // (e.g. every time a slot span becomes full or empty), so accessing it will
853 // result in some amount of cacheline ping-pong.
854 PA_ALWAYS_INLINE bool IsDirectMappedBucket(Bucket* bucket) const {
855 // All regular allocations are associated with a bucket in the |buckets_|
856 // array. A range check is then sufficient to identify direct-mapped
857 // allocations.
858 bool ret = !(bucket >= this->buckets && bucket <= &this->sentinel_bucket);
859 PA_DCHECK(ret == bucket->is_direct_mapped());
860 return ret;
861 }
862
863 // Allocates a memory slot, without initializing extras.
864 //
865 // - |flags| are as in AllocWithFlags().
866 // - |raw_size| accommodates for extras on top of AllocWithFlags()'s
867 // |requested_size|.
868 // - |usable_size| and |is_already_zeroed| are output only. |usable_size| is
869 // guaranteed to be larger or equal to AllocWithFlags()'s |requested_size|.
870 PA_ALWAYS_INLINE uintptr_t RawAlloc(Bucket* bucket,
871 unsigned int flags,
872 size_t raw_size,
873 size_t slot_span_alignment,
874 size_t* usable_size,
875 bool* is_already_zeroed);
876 PA_ALWAYS_INLINE uintptr_t AllocFromBucket(Bucket* bucket,
877 unsigned int flags,
878 size_t raw_size,
879 size_t slot_span_alignment,
880 size_t* usable_size,
881 bool* is_already_zeroed)
882 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
883
884 bool TryReallocInPlaceForNormalBuckets(void* object,
885 SlotSpan* slot_span,
886 size_t new_size);
887 bool TryReallocInPlaceForDirectMap(
888 internal::SlotSpanMetadata<thread_safe>* slot_span,
889 size_t requested_size) PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
890 void DecommitEmptySlotSpans() PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
891 PA_ALWAYS_INLINE void RawFreeLocked(uintptr_t slot_start)
892 PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
893 ThreadCache* MaybeInitThreadCache();
894
895 // May return an invalid thread cache.
896 PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
897 PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
898
899 #if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
900 static internal::Lock& GetEnumeratorLock();
901
902 PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) next_root = nullptr;
903 PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) prev_root = nullptr;
904
905 friend class internal::PartitionRootEnumerator;
906 #endif // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
907
908 friend class ThreadCache;
909 };
910
911 namespace internal {
912
913 class ScopedSyscallTimer {
914 public:
915 #if PA_CONFIG(COUNT_SYSCALL_TIME)
ScopedSyscallTimer(PartitionRoot<> * root)916 explicit ScopedSyscallTimer(PartitionRoot<>* root)
917 : root_(root), tick_(base::TimeTicks::Now()) {}
918
~ScopedSyscallTimer()919 ~ScopedSyscallTimer() {
920 root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
921
922 int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
923 if (elapsed_nanos > 0) {
924 root_->syscall_total_time_ns.fetch_add(
925 static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
926 }
927 }
928
929 private:
930 PartitionRoot<>* root_;
931 const base::TimeTicks tick_;
932 #else
933 explicit ScopedSyscallTimer(PartitionRoot<>* root) {
934 root->syscall_count.fetch_add(1, std::memory_order_relaxed);
935 }
936 #endif
937 };
938
939 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
940
941 PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address)942 PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
943 PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
944 #if BUILDFLAG(HAS_64_BIT_POINTERS)
945 // Use this variant of GetDirectMapReservationStart as it has better
946 // performance.
947 uintptr_t offset = OffsetInBRPPool(address);
948 uintptr_t reservation_start =
949 GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
950 #else // BUILDFLAG(HAS_64_BIT_POINTERS)
951 uintptr_t reservation_start = GetDirectMapReservationStart(address);
952 #endif
953 if (!reservation_start) {
954 return 0;
955 }
956
957 // The direct map allocation may not start exactly from the first page, as
958 // there may be padding for alignment. The first page metadata holds an offset
959 // to where direct map metadata, and thus direct map start, are located.
960 auto* first_page = PartitionPage<ThreadSafe>::FromAddr(reservation_start +
961 PartitionPageSize());
962 auto* page = first_page + first_page->slot_span_metadata_offset;
963 PA_DCHECK(page->is_valid);
964 PA_DCHECK(!page->slot_span_metadata_offset);
965 auto* slot_span = &page->slot_span_metadata;
966 uintptr_t slot_start =
967 SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
968 #if BUILDFLAG(PA_DCHECK_IS_ON)
969 auto* metadata =
970 PartitionDirectMapMetadata<ThreadSafe>::FromSlotSpan(slot_span);
971 size_t padding_for_alignment =
972 metadata->direct_map_extent.padding_for_alignment;
973 PA_DCHECK(padding_for_alignment ==
974 static_cast<size_t>(page - first_page) * PartitionPageSize());
975 PA_DCHECK(slot_start ==
976 reservation_start + PartitionPageSize() + padding_for_alignment);
977 #endif // BUILDFLAG(PA_DCHECK_IS_ON)
978 return slot_start;
979 }
980
981 // Gets the address to the beginning of the allocated slot. The input |address|
982 // can point anywhere in the slot, including the slot start as well as
983 // immediately past the slot.
984 //
985 // This isn't a general purpose function, it is used specifically for obtaining
986 // BackupRefPtr's ref-count. The caller is responsible for ensuring that the
987 // ref-count is in place for this allocation.
988 PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetSlotStartInBRPPool(uintptr_t address)989 PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
990 // Adjust to support pointers right past the end of an allocation, which in
991 // some cases appear to point outside the designated allocation slot.
992 //
993 // If ref-count is present before the allocation, then adjusting a valid
994 // pointer down will not cause us to go down to the previous slot, otherwise
995 // no adjustment is needed (and likely wouldn't be correct as there is
996 // a risk of going down to the previous slot). Either way,
997 // kPartitionPastAllocationAdjustment takes care of that detail.
998 address -= kPartitionPastAllocationAdjustment;
999 PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(address));
1000 DCheckIfManagedByPartitionAllocBRPPool(address);
1001
1002 uintptr_t directmap_slot_start =
1003 PartitionAllocGetDirectMapSlotStartInBRPPool(address);
1004 if (PA_UNLIKELY(directmap_slot_start)) {
1005 return directmap_slot_start;
1006 }
1007 auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromAddr(address);
1008 auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
1009 // Double check that ref-count is indeed present.
1010 PA_DCHECK(root->brp_enabled());
1011
1012 // Get the offset from the beginning of the slot span.
1013 uintptr_t slot_span_start =
1014 SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
1015 size_t offset_in_slot_span = address - slot_span_start;
1016
1017 auto* bucket = slot_span->bucket;
1018 return slot_span_start +
1019 bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
1020 }
1021
1022 // Return values to indicate where a pointer is pointing relative to the bounds
1023 // of an allocation.
1024 enum class PtrPosWithinAlloc {
1025 // When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
1026 // are also considered in-bounds.
1027 kInBounds,
1028 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
1029 kAllocEnd,
1030 #endif
1031 kFarOOB
1032 };
1033
1034 // Checks whether `test_address` is in the same allocation slot as
1035 // `orig_address`.
1036 //
1037 // This can be called after adding or subtracting from the `orig_address`
1038 // to produce a different pointer which must still stay in the same allocation.
1039 //
1040 // The `type_size` is the size of the type that the raw_ptr is pointing to,
1041 // which may be the type the allocation is holding or a compatible pointer type
1042 // such as a base class or char*. It is used to detect pointers near the end of
1043 // the allocation but not strictly beyond it.
1044 //
1045 // This isn't a general purpose function. The caller is responsible for ensuring
1046 // that the ref-count is in place for this allocation.
1047 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
1048 PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
1049 uintptr_t test_address,
1050 size_t type_size);
1051
PartitionAllocFreeForRefCounting(uintptr_t slot_start)1052 PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
1053 PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
1054
1055 auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
1056 auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
1057 // PartitionRefCount is required to be allocated inside a `PartitionRoot` that
1058 // supports reference counts.
1059 PA_DCHECK(root->brp_enabled());
1060
1061 // Iterating over the entire slot can be really expensive.
1062 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1063 auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
1064 // If we have a hook the object segment is not necessarily filled
1065 // with |kQuarantinedByte|.
1066 if (PA_LIKELY(!hook)) {
1067 unsigned char* object =
1068 static_cast<unsigned char*>(root->SlotStartToObject(slot_start));
1069 for (size_t i = 0; i < slot_span->GetUsableSize(root); ++i) {
1070 PA_DCHECK(object[i] == kQuarantinedByte);
1071 }
1072 }
1073 DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
1074 slot_span->GetUtilizedSlotSize()
1075 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
1076 - sizeof(PartitionRefCount)
1077 #endif
1078 );
1079 #endif
1080
1081 root->total_size_of_brp_quarantined_bytes.fetch_sub(
1082 slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
1083 root->total_count_of_brp_quarantined_slots.fetch_sub(
1084 1, std::memory_order_relaxed);
1085
1086 root->RawFreeWithThreadCache(slot_start, slot_span);
1087 }
1088 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1089
1090 } // namespace internal
1091
1092 template <bool thread_safe>
1093 PA_ALWAYS_INLINE uintptr_t
AllocFromBucket(Bucket * bucket,unsigned int flags,size_t raw_size,size_t slot_span_alignment,size_t * usable_size,bool * is_already_zeroed)1094 PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
1095 unsigned int flags,
1096 size_t raw_size,
1097 size_t slot_span_alignment,
1098 size_t* usable_size,
1099 bool* is_already_zeroed) {
1100 PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
1101 internal::base::bits::IsPowerOfTwo(slot_span_alignment));
1102 SlotSpan* slot_span = bucket->active_slot_spans_head;
1103 // There always must be a slot span on the active list (could be a sentinel).
1104 PA_DCHECK(slot_span);
1105 // Check that it isn't marked full, which could only be true if the span was
1106 // removed from the active list.
1107 PA_DCHECK(!slot_span->marked_full);
1108
1109 uintptr_t slot_start =
1110 internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
1111 // Use the fast path when a slot is readily available on the free list of the
1112 // first active slot span. However, fall back to the slow path if a
1113 // higher-order alignment is requested, because an inner slot of an existing
1114 // slot span is unlikely to satisfy it.
1115 if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
1116 slot_start)) {
1117 *is_already_zeroed = false;
1118 // This is a fast path, avoid calling GetUsableSize() in Release builds
1119 // as it is costlier. Copy its small bucket path instead.
1120 *usable_size = AdjustSizeForExtrasSubtract(bucket->slot_size);
1121 PA_DCHECK(*usable_size == slot_span->GetUsableSize(this));
1122
1123 // If these DCHECKs fire, you probably corrupted memory.
1124 // TODO(crbug.com/1257655): See if we can afford to make these CHECKs.
1125 PA_DCHECK(IsValidSlotSpan(slot_span));
1126
1127 // All large allocations must go through the slow path to correctly update
1128 // the size metadata.
1129 PA_DCHECK(!slot_span->CanStoreRawSize());
1130 PA_DCHECK(!slot_span->bucket->is_direct_mapped());
1131 void* entry = slot_span->PopForAlloc(bucket->slot_size);
1132 PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
1133
1134 PA_DCHECK(slot_span->bucket == bucket);
1135 } else {
1136 slot_start = bucket->SlowPathAlloc(this, flags, raw_size,
1137 slot_span_alignment, is_already_zeroed);
1138 if (PA_UNLIKELY(!slot_start)) {
1139 return 0;
1140 }
1141
1142 slot_span = SlotSpan::FromSlotStart(slot_start);
1143 // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
1144 PA_DCHECK(IsValidSlotSpan(slot_span));
1145 // For direct mapped allocations, |bucket| is the sentinel.
1146 PA_DCHECK((slot_span->bucket == bucket) ||
1147 (slot_span->bucket->is_direct_mapped() &&
1148 (bucket == &sentinel_bucket)));
1149
1150 *usable_size = slot_span->GetUsableSize(this);
1151 }
1152 PA_DCHECK(slot_span->GetUtilizedSlotSize() <= slot_span->bucket->slot_size);
1153 IncreaseTotalSizeOfAllocatedBytes(
1154 slot_start, slot_span->GetSlotSizeForBookkeeping(), raw_size);
1155
1156 #if BUILDFLAG(USE_FREESLOT_BITMAP)
1157 if (!slot_span->bucket->is_direct_mapped()) {
1158 internal::FreeSlotBitmapMarkSlotAsUsed(slot_start);
1159 }
1160 #endif
1161
1162 return slot_start;
1163 }
1164
1165 // static
1166 template <bool thread_safe>
Free(void * object)1167 PA_NOINLINE void PartitionRoot<thread_safe>::Free(void* object) {
1168 return FreeWithFlags(0, object);
1169 }
1170
1171 // static
1172 template <bool thread_safe>
FreeWithFlags(unsigned int flags,void * object)1173 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
1174 unsigned int flags,
1175 void* object) {
1176 PA_DCHECK(flags < FreeFlags::kLastFlag << 1);
1177
1178 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1179 if (!(flags & FreeFlags::kNoMemoryToolOverride)) {
1180 free(object);
1181 return;
1182 }
1183 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1184 if (PA_UNLIKELY(!object)) {
1185 return;
1186 }
1187
1188 if (PartitionAllocHooks::AreHooksEnabled()) {
1189 PartitionAllocHooks::FreeObserverHookIfEnabled(object);
1190 if (PartitionAllocHooks::FreeOverrideHookIfEnabled(object)) {
1191 return;
1192 }
1193 }
1194
1195 FreeNoHooks(object);
1196 }
1197
1198 // Returns whether MTE is supported for this partition root. Because MTE stores
1199 // tagging information in the high bits of the pointer, it causes issues with
1200 // components like V8's ArrayBuffers which use custom pointer representations.
1201 // All custom representations encountered so far rely on an "is in configurable
1202 // pool?" check, so we use that as a proxy.
1203 template <bool thread_safe>
IsMemoryTaggingEnabled()1204 PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
1205 const {
1206 #if PA_CONFIG(HAS_MEMORY_TAGGING)
1207 return !flags.use_configurable_pool;
1208 #else
1209 return false;
1210 #endif
1211 }
1212
1213 // static
1214 template <bool thread_safe>
FreeNoHooks(void * object)1215 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
1216 if (PA_UNLIKELY(!object)) {
1217 return;
1218 }
1219 // Almost all calls to FreeNoNooks() will end up writing to |*object|, the
1220 // only cases where we don't would be delayed free() in PCScan, but |*object|
1221 // can be cold in cache.
1222 PA_PREFETCH(object);
1223 uintptr_t object_addr = internal::ObjectPtr2Addr(object);
1224
1225 // On Android, malloc() interception is more fragile than on other
1226 // platforms, as we use wrapped symbols. However, the pools allow us to
1227 // quickly tell that a pointer was allocated with PartitionAlloc.
1228 //
1229 // This is a crash to detect imperfect symbol interception. However, we can
1230 // forward allocations we don't own to the system malloc() implementation in
1231 // these rare cases, assuming that some remain.
1232 //
1233 // On Android Chromecast devices, this is already checked in PartitionFree()
1234 // in the shim.
1235 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
1236 (BUILDFLAG(IS_ANDROID) && !BUILDFLAG(PA_IS_CAST_ANDROID))
1237 PA_CHECK(IsManagedByPartitionAlloc(object_addr));
1238 #endif
1239
1240 // Fetch the root from the address, and not SlotSpanMetadata. This is
1241 // important, as obtaining it from SlotSpanMetadata is a slow operation
1242 // (looking into the metadata area, and following a pointer), which can induce
1243 // cache coherency traffic (since they're read on every free(), and written to
1244 // on any malloc()/free() that is not a hit in the thread cache). This way we
1245 // change the critical path from object -> slot_span -> root into two
1246 // *parallel* ones:
1247 // 1. object -> root
1248 // 2. object -> slot_span
1249 auto* root = FromAddrInFirstSuperpage(object_addr);
1250 SlotSpan* slot_span = SlotSpan::FromObject(object);
1251 PA_DCHECK(FromSlotSpan(slot_span) == root);
1252
1253 uintptr_t slot_start = root->ObjectToSlotStart(object);
1254 PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
1255
1256 #if PA_CONFIG(HAS_MEMORY_TAGGING)
1257 if (PA_LIKELY(root->IsMemoryTaggingEnabled())) {
1258 const size_t slot_size = slot_span->bucket->slot_size;
1259 if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
1260 // slot_span is untagged at this point, so we have to recover its tag
1261 // again to increment and provide use-after-free mitigations.
1262 internal::TagMemoryRangeIncrement(internal::TagAddr(slot_start),
1263 slot_size);
1264 // Incrementing the MTE-tag in the memory range invalidates the |object|'s
1265 // tag, so it must be retagged.
1266 object = internal::TagPtr(object);
1267 }
1268 }
1269 #else
1270 // We are going to read from |*slot_span| in all branches, but haven't done it
1271 // yet.
1272 //
1273 // TODO(crbug.com/1207307): It would be much better to avoid touching
1274 // |*slot_span| at all on the fast path, or at least to separate its read-only
1275 // parts (i.e. bucket pointer) from the rest. Indeed, every thread cache miss
1276 // (or batch fill) will *write* to |slot_span->freelist_head|, leading to
1277 // cacheline ping-pong.
1278 //
1279 // Don't do it when memory tagging is enabled, as |*slot_span| has already
1280 // been touched above.
1281 PA_PREFETCH(slot_span);
1282 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
1283
1284 #if BUILDFLAG(USE_STARSCAN)
1285 // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
1286 // default.
1287 if (PA_UNLIKELY(root->ShouldQuarantine(object))) {
1288 // PCScan safepoint. Call before potentially scheduling scanning task.
1289 PCScan::JoinScanIfNeeded();
1290 if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
1291 PCScan::MoveToQuarantine(object, slot_span->GetUsableSize(root),
1292 slot_start, slot_span->bucket->slot_size);
1293 return;
1294 }
1295 }
1296 #endif // BUILDFLAG(USE_STARSCAN)
1297
1298 root->FreeNoHooksImmediate(object, slot_span, slot_start);
1299 }
1300
1301 template <bool thread_safe>
FreeNoHooksImmediate(void * object,SlotSpan * slot_span,uintptr_t slot_start)1302 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
1303 void* object,
1304 SlotSpan* slot_span,
1305 uintptr_t slot_start) {
1306 // The thread cache is added "in the middle" of the main allocator, that is:
1307 // - After all the cookie/ref-count management
1308 // - Before the "raw" allocator.
1309 //
1310 // On the deallocation side:
1311 // 1. Check cookie/ref-count, adjust the pointer
1312 // 2. Deallocation
1313 // a. Return to the thread cache if possible. If it succeeds, return.
1314 // b. Otherwise, call the "raw" allocator <-- Locking
1315 PA_DCHECK(object);
1316 PA_DCHECK(slot_span);
1317 PA_DCHECK(IsValidSlotSpan(slot_span));
1318 PA_DCHECK(slot_start);
1319
1320 // Layout inside the slot:
1321 // |[refcnt]|...object...|[empty]|[cookie]|[unused]|
1322 // <--------(a)--------->
1323 // <--(b)---> + <--(b)--->
1324 // <-----------------(c)------------------>
1325 // (a) usable_size
1326 // (b) extras
1327 // (c) utilized_slot_size
1328 //
1329 // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
1330 // |...object...|[empty]|[cookie]|[unused]|[refcnt]|
1331 // <--------(a)--------->
1332 // <--(b)---> + <--(b)--->
1333 // <-------------(c)-------------> + <--(c)--->
1334 //
1335 // Note: ref-count and cookie can be 0-sized.
1336 //
1337 // For more context, see the other "Layout inside the slot" comment inside
1338 // AllocWithFlagsNoHooks().
1339
1340 #if BUILDFLAG(PA_DCHECK_IS_ON)
1341 if (flags.allow_cookie) {
1342 // Verify the cookie after the allocated region.
1343 // If this assert fires, you probably corrupted memory.
1344 internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
1345 slot_span->GetUsableSize(this));
1346 }
1347 #endif
1348
1349 #if BUILDFLAG(USE_STARSCAN)
1350 // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
1351 // default.
1352 if (PA_UNLIKELY(IsQuarantineEnabled())) {
1353 if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
1354 // Mark the state in the state bitmap as freed.
1355 internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
1356 }
1357 }
1358 #endif // BUILDFLAG(USE_STARSCAN)
1359
1360 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1361 // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
1362 // be false only for the aligned partition.
1363 if (brp_enabled()) {
1364 auto* ref_count = internal::PartitionRefCountPointer(slot_start);
1365 // If there are no more references to the allocation, it can be freed
1366 // immediately. Otherwise, defer the operation and zap the memory to turn
1367 // potential use-after-free issues into unexploitable crashes.
1368 if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
1369 brp_zapping_enabled())) {
1370 auto usable_size = slot_span->GetUsableSize(this);
1371 auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
1372 if (PA_UNLIKELY(hook)) {
1373 hook(object, usable_size);
1374 } else {
1375 internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
1376 }
1377 }
1378
1379 if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
1380 total_size_of_brp_quarantined_bytes.fetch_add(
1381 slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
1382 total_count_of_brp_quarantined_slots.fetch_add(1,
1383 std::memory_order_relaxed);
1384 cumulative_size_of_brp_quarantined_bytes.fetch_add(
1385 slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
1386 cumulative_count_of_brp_quarantined_slots.fetch_add(
1387 1, std::memory_order_relaxed);
1388 return;
1389 }
1390 }
1391 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
1392
1393 // memset() can be really expensive.
1394 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
1395 internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
1396 internal::kFreedByte,
1397 slot_span->GetUtilizedSlotSize()
1398 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
1399 - sizeof(internal::PartitionRefCount)
1400 #endif
1401 );
1402 #elif PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
1403 // `memset` only once in a while: we're trading off safety for time
1404 // efficiency.
1405 if (PA_UNLIKELY(internal::RandomPeriod()) &&
1406 !IsDirectMappedBucket(slot_span->bucket)) {
1407 internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
1408 slot_span->GetUtilizedSlotSize()
1409 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
1410 - sizeof(internal::PartitionRefCount)
1411 #endif
1412 );
1413 }
1414 #endif // PA_CONFIG(ZERO_RANDOMLY_ON_FREE)
1415
1416 RawFreeWithThreadCache(slot_start, slot_span);
1417 }
1418
1419 template <bool thread_safe>
FreeInSlotSpan(uintptr_t slot_start,SlotSpan * slot_span)1420 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeInSlotSpan(
1421 uintptr_t slot_start,
1422 SlotSpan* slot_span) {
1423 DecreaseTotalSizeOfAllocatedBytes(slot_start,
1424 slot_span->GetSlotSizeForBookkeeping());
1425
1426 #if BUILDFLAG(USE_FREESLOT_BITMAP)
1427 if (!slot_span->bucket->is_direct_mapped()) {
1428 internal::FreeSlotBitmapMarkSlotAsFree(slot_start);
1429 }
1430 #endif
1431
1432 return slot_span->Free(slot_start);
1433 }
1434
1435 template <bool thread_safe>
RawFree(uintptr_t slot_start)1436 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(
1437 uintptr_t slot_start) {
1438 SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
1439 RawFree(slot_start, slot_span);
1440 }
1441
1442 #if PA_CONFIG(IS_NONCLANG_MSVC)
1443 // MSVC only supports inline assembly on x86. This preprocessor directive
1444 // is intended to be a replacement for the same.
1445 //
1446 // TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
1447 // a no-op or similar. The documentation doesn't say.
1448 #pragma optimize("", off)
1449 #endif
1450 template <bool thread_safe>
RawFree(uintptr_t slot_start,SlotSpan * slot_span)1451 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(uintptr_t slot_start,
1452 SlotSpan* slot_span) {
1453 // At this point we are about to acquire the lock, so we try to minimize the
1454 // risk of blocking inside the locked section.
1455 //
1456 // For allocations that are not direct-mapped, there will always be a store at
1457 // the beginning of |*slot_start|, to link the freelist. This is why there is
1458 // a prefetch of it at the beginning of the free() path.
1459 //
1460 // However, the memory which is being freed can be very cold (for instance
1461 // during browser shutdown, when various caches are finally completely freed),
1462 // and so moved to either compressed memory or swap. This means that touching
1463 // it here can cause a major page fault. This is in turn will cause
1464 // descheduling of the thread *while locked*. Since we don't have priority
1465 // inheritance locks on most platforms, avoiding long locked periods relies on
1466 // the OS having proper priority boosting. There is evidence
1467 // (crbug.com/1228523) that this is not always the case on Windows, and a very
1468 // low priority background thread can block the main one for a long time,
1469 // leading to hangs.
1470 //
1471 // To mitigate that, make sure that we fault *before* locking. Note that this
1472 // is useless for direct-mapped allocations (which are very rare anyway), and
1473 // that this path is *not* taken for thread cache bucket purge (since it calls
1474 // RawFreeLocked()). This is intentional, as the thread cache is purged often,
1475 // and the memory has a consequence the memory has already been touched
1476 // recently (to link the thread cache freelist).
1477 *static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
1478 0;
1479 // Note: even though we write to slot_start + sizeof(void*) as well, due to
1480 // alignment constraints, the two locations are always going to be in the same
1481 // OS page. No need to write to the second one as well.
1482 //
1483 // Do not move the store above inside the locked section.
1484 #if !(PA_CONFIG(IS_NONCLANG_MSVC))
1485 __asm__ __volatile__("" : : "r"(slot_start) : "memory");
1486 #endif
1487
1488 ::partition_alloc::internal::ScopedGuard guard{lock_};
1489 FreeInSlotSpan(slot_start, slot_span);
1490 }
1491 #if PA_CONFIG(IS_NONCLANG_MSVC)
1492 #pragma optimize("", on)
1493 #endif
1494
1495 template <bool thread_safe>
RawFreeBatch(FreeListEntry * head,FreeListEntry * tail,size_t size,SlotSpan * slot_span)1496 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeBatch(
1497 FreeListEntry* head,
1498 FreeListEntry* tail,
1499 size_t size,
1500 SlotSpan* slot_span) {
1501 PA_DCHECK(head);
1502 PA_DCHECK(tail);
1503 PA_DCHECK(size > 0);
1504 PA_DCHECK(slot_span);
1505 PA_DCHECK(IsValidSlotSpan(slot_span));
1506 // The passed freelist is likely to be just built up, which means that the
1507 // corresponding pages were faulted in (without acquiring the lock). So there
1508 // is no need to touch pages manually here before the lock.
1509 ::partition_alloc::internal::ScopedGuard guard{lock_};
1510 // TODO(thiabaud): Fix the accounting here. The size is correct, but the
1511 // pointer is not. This only affects local tools that record each allocation,
1512 // not our metrics.
1513 DecreaseTotalSizeOfAllocatedBytes(
1514 0u, slot_span->GetSlotSizeForBookkeeping() * size);
1515 slot_span->AppendFreeList(head, tail, size);
1516 }
1517
1518 template <bool thread_safe>
RawFreeWithThreadCache(uintptr_t slot_start,SlotSpan * slot_span)1519 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
1520 uintptr_t slot_start,
1521 SlotSpan* slot_span) {
1522 // PA_LIKELY: performance-sensitive partitions have a thread cache,
1523 // direct-mapped allocations are uncommon.
1524 ThreadCache* thread_cache = GetThreadCache();
1525 if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
1526 !IsDirectMappedBucket(slot_span->bucket))) {
1527 size_t bucket_index =
1528 static_cast<size_t>(slot_span->bucket - this->buckets);
1529 size_t slot_size;
1530 if (PA_LIKELY(thread_cache->MaybePutInCache(slot_start, bucket_index,
1531 &slot_size))) {
1532 // This is a fast path, avoid calling GetUsableSize() in Release builds
1533 // as it is costlier. Copy its small bucket path instead.
1534 PA_DCHECK(!slot_span->CanStoreRawSize());
1535 size_t usable_size = AdjustSizeForExtrasSubtract(slot_size);
1536 PA_DCHECK(usable_size == slot_span->GetUsableSize(this));
1537 thread_cache->RecordDeallocation(usable_size);
1538 return;
1539 }
1540 }
1541
1542 if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
1543 // Accounting must be done outside `RawFree()`, as it's also called from the
1544 // thread cache. We would double-count otherwise.
1545 //
1546 // GetUsableSize() will always give the correct result, and we are in a slow
1547 // path here (since the thread cache case returned earlier).
1548 size_t usable_size = slot_span->GetUsableSize(this);
1549 thread_cache->RecordDeallocation(usable_size);
1550 }
1551 RawFree(slot_start, slot_span);
1552 }
1553
1554 template <bool thread_safe>
RawFreeLocked(uintptr_t slot_start)1555 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(
1556 uintptr_t slot_start) {
1557 SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
1558 // Direct-mapped deallocation releases then re-acquires the lock. The caller
1559 // may not expect that, but we never call this function on direct-mapped
1560 // allocations.
1561 PA_DCHECK(!IsDirectMappedBucket(slot_span->bucket));
1562 FreeInSlotSpan(slot_start, slot_span);
1563 }
1564
1565 // static
1566 template <bool thread_safe>
IsValidSlotSpan(SlotSpan * slot_span)1567 PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
1568 SlotSpan* slot_span) {
1569 PartitionRoot* root = FromSlotSpan(slot_span);
1570 return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
1571 }
1572
1573 template <bool thread_safe>
1574 PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
FromSlotSpan(SlotSpan * slot_span)1575 PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
1576 auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
1577 reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
1578 return extent_entry->root;
1579 }
1580
1581 template <bool thread_safe>
1582 PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
FromFirstSuperPage(uintptr_t super_page)1583 PartitionRoot<thread_safe>::FromFirstSuperPage(uintptr_t super_page) {
1584 PA_DCHECK(internal::IsReservationStart(super_page));
1585 auto* extent_entry =
1586 internal::PartitionSuperPageToExtent<thread_safe>(super_page);
1587 PartitionRoot* root = extent_entry->root;
1588 PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
1589 return root;
1590 }
1591
1592 template <bool thread_safe>
1593 PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
FromAddrInFirstSuperpage(uintptr_t address)1594 PartitionRoot<thread_safe>::FromAddrInFirstSuperpage(uintptr_t address) {
1595 uintptr_t super_page = address & internal::kSuperPageBaseMask;
1596 PA_DCHECK(internal::IsReservationStart(super_page));
1597 return FromFirstSuperPage(super_page);
1598 }
1599
1600 template <bool thread_safe>
1601 PA_ALWAYS_INLINE void
IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,size_t len,size_t raw_size)1602 PartitionRoot<thread_safe>::IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
1603 size_t len,
1604 size_t raw_size) {
1605 total_size_of_allocated_bytes += len;
1606 max_size_of_allocated_bytes =
1607 std::max(max_size_of_allocated_bytes, total_size_of_allocated_bytes);
1608 #if BUILDFLAG(RECORD_ALLOC_INFO)
1609 partition_alloc::internal::RecordAllocOrFree(addr | 0x01, raw_size);
1610 #endif // BUILDFLAG(RECORD_ALLOC_INFO)
1611 }
1612
1613 template <bool thread_safe>
1614 PA_ALWAYS_INLINE void
DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,size_t len)1615 PartitionRoot<thread_safe>::DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
1616 size_t len) {
1617 // An underflow here means we've miscounted |total_size_of_allocated_bytes|
1618 // somewhere.
1619 PA_DCHECK(total_size_of_allocated_bytes >= len);
1620 total_size_of_allocated_bytes -= len;
1621 #if BUILDFLAG(RECORD_ALLOC_INFO)
1622 partition_alloc::internal::RecordAllocOrFree(addr | 0x00, len);
1623 #endif // BUILDFLAG(RECORD_ALLOC_INFO)
1624 }
1625
1626 template <bool thread_safe>
IncreaseCommittedPages(size_t len)1627 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
1628 size_t len) {
1629 const auto old_total =
1630 total_size_of_committed_pages.fetch_add(len, std::memory_order_relaxed);
1631
1632 const auto new_total = old_total + len;
1633
1634 // This function is called quite frequently; to avoid performance problems, we
1635 // don't want to hold a lock here, so we use compare and exchange instead.
1636 size_t expected = max_size_of_committed_pages.load(std::memory_order_relaxed);
1637 size_t desired;
1638 do {
1639 desired = std::max(expected, new_total);
1640 } while (!max_size_of_committed_pages.compare_exchange_weak(
1641 expected, desired, std::memory_order_relaxed, std::memory_order_relaxed));
1642 }
1643
1644 template <bool thread_safe>
DecreaseCommittedPages(size_t len)1645 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
1646 size_t len) {
1647 total_size_of_committed_pages.fetch_sub(len, std::memory_order_relaxed);
1648 }
1649
1650 template <bool thread_safe>
DecommitSystemPagesForData(uintptr_t address,size_t length,PageAccessibilityDisposition accessibility_disposition)1651 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::DecommitSystemPagesForData(
1652 uintptr_t address,
1653 size_t length,
1654 PageAccessibilityDisposition accessibility_disposition) {
1655 internal::ScopedSyscallTimer timer{this};
1656 DecommitSystemPages(address, length, accessibility_disposition);
1657 DecreaseCommittedPages(length);
1658 }
1659
1660 // Not unified with TryRecommitSystemPagesForData() to preserve error codes.
1661 template <bool thread_safe>
RecommitSystemPagesForData(uintptr_t address,size_t length,PageAccessibilityDisposition accessibility_disposition)1662 PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPagesForData(
1663 uintptr_t address,
1664 size_t length,
1665 PageAccessibilityDisposition accessibility_disposition) {
1666 internal::ScopedSyscallTimer timer{this};
1667
1668 bool ok = TryRecommitSystemPages(address, length, GetPageAccessibility(),
1669 accessibility_disposition);
1670 if (PA_UNLIKELY(!ok)) {
1671 // Decommit some memory and retry. The alternative is crashing.
1672 DecommitEmptySlotSpans();
1673 RecommitSystemPages(address, length, GetPageAccessibility(),
1674 accessibility_disposition);
1675 }
1676
1677 IncreaseCommittedPages(length);
1678 }
1679
1680 template <bool thread_safe>
TryRecommitSystemPagesForData(uintptr_t address,size_t length,PageAccessibilityDisposition accessibility_disposition)1681 PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::TryRecommitSystemPagesForData(
1682 uintptr_t address,
1683 size_t length,
1684 PageAccessibilityDisposition accessibility_disposition) {
1685 internal::ScopedSyscallTimer timer{this};
1686 bool ok = TryRecommitSystemPages(address, length, GetPageAccessibility(),
1687 accessibility_disposition);
1688 if (PA_UNLIKELY(!ok)) {
1689 // Decommit some memory and retry. The alternative is crashing.
1690 {
1691 ::partition_alloc::internal::ScopedGuard guard(lock_);
1692 DecommitEmptySlotSpans();
1693 }
1694 ok = TryRecommitSystemPages(address, length, GetPageAccessibility(),
1695 accessibility_disposition);
1696 }
1697
1698 if (ok) {
1699 IncreaseCommittedPages(length);
1700 }
1701
1702 return ok;
1703 }
1704
1705 // static
1706 //
1707 // Returns the size available to the app. It can be equal or higher than the
1708 // requested size. If higher, the overage won't exceed what's actually usable
1709 // by the app without a risk of running out of an allocated region or into
1710 // PartitionAlloc's internal data. Used as malloc_usable_size and malloc_size.
1711 //
1712 // |ptr| should preferably point to the beginning of an object returned from
1713 // malloc() et al., but it doesn't have to. crbug.com/1292646 shows an example
1714 // where this isn't the case. Note, an inner object pointer won't work for
1715 // direct map, unless it is within the first partition page.
1716 template <bool thread_safe>
GetUsableSize(void * ptr)1717 PA_ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
1718 // malloc_usable_size() is expected to handle NULL gracefully and return 0.
1719 if (!ptr) {
1720 return 0;
1721 }
1722 auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
1723 auto* root = FromSlotSpan(slot_span);
1724 return slot_span->GetUsableSize(root);
1725 }
1726
1727 template <bool thread_safe>
1728 PA_ALWAYS_INLINE size_t
GetUsableSizeWithMac11MallocSizeHack(void * ptr)1729 PartitionRoot<thread_safe>::GetUsableSizeWithMac11MallocSizeHack(void* ptr) {
1730 // malloc_usable_size() is expected to handle NULL gracefully and return 0.
1731 if (!ptr) {
1732 return 0;
1733 }
1734 auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
1735 auto* root = FromSlotSpan(slot_span);
1736 size_t usable_size = slot_span->GetUsableSize(root);
1737 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
1738 // Check |mac11_malloc_size_hack_enabled_| flag first as this doesn't
1739 // concern OS versions other than macOS 11.
1740 if (PA_UNLIKELY(root->flags.mac11_malloc_size_hack_enabled_ &&
1741 usable_size == internal::kMac11MallocSizeHackUsableSize)) {
1742 uintptr_t slot_start =
1743 internal::PartitionAllocGetSlotStartInBRPPool(UntagPtr(ptr));
1744 auto* ref_count = internal::PartitionRefCountPointer(slot_start);
1745 if (ref_count->NeedsMac11MallocSizeHack()) {
1746 return internal::kMac11MallocSizeHackRequestedSize;
1747 }
1748 }
1749 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
1750
1751 return usable_size;
1752 }
1753
1754 // Returns the page configuration to use when mapping slot spans for a given
1755 // partition root. ReadWriteTagged is used on MTE-enabled systems for
1756 // PartitionRoots supporting it.
1757 template <bool thread_safe>
1758 PA_ALWAYS_INLINE PageAccessibilityConfiguration
GetPageAccessibility()1759 PartitionRoot<thread_safe>::GetPageAccessibility() const {
1760 PageAccessibilityConfiguration::Permissions permissions =
1761 PageAccessibilityConfiguration::kReadWrite;
1762 #if PA_CONFIG(HAS_MEMORY_TAGGING)
1763 if (IsMemoryTaggingEnabled()) {
1764 permissions = PageAccessibilityConfiguration::kReadWriteTagged;
1765 }
1766 #endif
1767 #if BUILDFLAG(ENABLE_PKEYS)
1768 return PageAccessibilityConfiguration(permissions, flags.pkey);
1769 #else
1770 return PageAccessibilityConfiguration(permissions);
1771 #endif
1772 }
1773
1774 template <bool thread_safe>
1775 PA_ALWAYS_INLINE PageAccessibilityConfiguration
PageAccessibilityWithPkeyIfEnabled(PageAccessibilityConfiguration::Permissions permissions)1776 PartitionRoot<thread_safe>::PageAccessibilityWithPkeyIfEnabled(
1777 PageAccessibilityConfiguration::Permissions permissions) const {
1778 #if BUILDFLAG(ENABLE_PKEYS)
1779 return PageAccessibilityConfiguration(permissions, flags.pkey);
1780 #endif
1781 return PageAccessibilityConfiguration(permissions);
1782 }
1783
1784 // Return the capacity of the underlying slot (adjusted for extras). This
1785 // doesn't mean this capacity is readily available. It merely means that if
1786 // a new allocation (or realloc) happened with that returned value, it'd use
1787 // the same amount of underlying memory.
1788 template <bool thread_safe>
1789 PA_ALWAYS_INLINE size_t
AllocationCapacityFromSlotStart(uintptr_t slot_start)1790 PartitionRoot<thread_safe>::AllocationCapacityFromSlotStart(
1791 uintptr_t slot_start) const {
1792 auto* slot_span = SlotSpan::FromSlotStart(slot_start);
1793 return AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
1794 }
1795
1796 // static
1797 template <bool thread_safe>
SizeToBucketIndex(size_t size,BucketDistribution bucket_distribution)1798 PA_ALWAYS_INLINE uint16_t PartitionRoot<thread_safe>::SizeToBucketIndex(
1799 size_t size,
1800 BucketDistribution bucket_distribution) {
1801 switch (bucket_distribution) {
1802 case BucketDistribution::kDefault:
1803 return internal::BucketIndexLookup::GetIndexForDefaultBuckets(size);
1804 case BucketDistribution::kDenser:
1805 return internal::BucketIndexLookup::GetIndexForDenserBuckets(size);
1806 }
1807 }
1808
1809 template <bool thread_safe>
AllocWithFlags(unsigned int flags,size_t requested_size,const char * type_name)1810 PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlags(
1811 unsigned int flags,
1812 size_t requested_size,
1813 const char* type_name) {
1814 return AllocWithFlagsInternal(flags, requested_size,
1815 internal::PartitionPageSize(), type_name);
1816 }
1817
1818 template <bool thread_safe>
AllocWithFlagsInternal(unsigned int flags,size_t requested_size,size_t slot_span_alignment,const char * type_name)1819 PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsInternal(
1820 unsigned int flags,
1821 size_t requested_size,
1822 size_t slot_span_alignment,
1823 const char* type_name) {
1824 PA_DCHECK(
1825 (slot_span_alignment >= internal::PartitionPageSize()) &&
1826 partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
1827
1828 PA_DCHECK(flags < AllocFlags::kLastFlag << 1);
1829 PA_DCHECK((flags & AllocFlags::kNoHooks) == 0); // Internal only.
1830 PA_DCHECK(initialized);
1831
1832 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1833 if (!(flags & AllocFlags::kNoMemoryToolOverride)) {
1834 CHECK_MAX_SIZE_OR_RETURN_NULLPTR(requested_size, flags);
1835 const bool zero_fill = flags & AllocFlags::kZeroFill;
1836 void* result =
1837 zero_fill ? calloc(1, requested_size) : malloc(requested_size);
1838 PA_CHECK(result || flags & AllocFlags::kReturnNull);
1839 return result;
1840 }
1841 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1842 void* object = nullptr;
1843 const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
1844 if (PA_UNLIKELY(hooks_enabled)) {
1845 if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
1846 &object, flags, requested_size, type_name)) {
1847 PartitionAllocHooks::AllocationObserverHookIfEnabled(
1848 object, requested_size, type_name);
1849 return object;
1850 }
1851 }
1852
1853 object = AllocWithFlagsNoHooks(flags, requested_size, slot_span_alignment);
1854
1855 if (PA_UNLIKELY(hooks_enabled)) {
1856 PartitionAllocHooks::AllocationObserverHookIfEnabled(object, requested_size,
1857 type_name);
1858 }
1859
1860 return object;
1861 }
1862
1863 template <bool thread_safe>
AllocWithFlagsNoHooks(unsigned int flags,size_t requested_size,size_t slot_span_alignment)1864 PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
1865 unsigned int flags,
1866 size_t requested_size,
1867 size_t slot_span_alignment) {
1868 PA_DCHECK(
1869 (slot_span_alignment >= internal::PartitionPageSize()) &&
1870 partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
1871
1872 // The thread cache is added "in the middle" of the main allocator, that is:
1873 // - After all the cookie/ref-count management
1874 // - Before the "raw" allocator.
1875 //
1876 // That is, the general allocation flow is:
1877 // 1. Adjustment of requested size to make room for extras
1878 // 2. Allocation:
1879 // a. Call to the thread cache, if it succeeds, go to step 3.
1880 // b. Otherwise, call the "raw" allocator <-- Locking
1881 // 3. Handle cookie/ref-count, zero allocation if required
1882
1883 size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
1884 PA_CHECK(raw_size >= requested_size); // check for overflows
1885
1886 // We should only call |SizeToBucketIndex| at most once when allocating.
1887 // Otherwise, we risk having |bucket_distribution| changed
1888 // underneath us (between calls to |SizeToBucketIndex| during the same call),
1889 // which would result in an inconsistent state.
1890 uint16_t bucket_index =
1891 SizeToBucketIndex(raw_size, this->GetBucketDistribution());
1892 size_t usable_size;
1893 bool is_already_zeroed = false;
1894 uintptr_t slot_start = 0;
1895 size_t slot_size;
1896
1897 #if BUILDFLAG(USE_STARSCAN)
1898 const bool is_quarantine_enabled = IsQuarantineEnabled();
1899 // PCScan safepoint. Call before trying to allocate from cache.
1900 // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
1901 // default.
1902 if (PA_UNLIKELY(is_quarantine_enabled)) {
1903 PCScan::JoinScanIfNeeded();
1904 }
1905 #endif // BUILDFLAG(USE_STARSCAN)
1906
1907 auto* thread_cache = GetOrCreateThreadCache();
1908
1909 // Don't use thread cache if higher order alignment is requested, because the
1910 // thread cache will not be able to satisfy it.
1911 //
1912 // PA_LIKELY: performance-sensitive partitions use the thread cache.
1913 if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
1914 slot_span_alignment <= internal::PartitionPageSize())) {
1915 // Note: getting slot_size from the thread cache rather than by
1916 // `buckets[bucket_index].slot_size` to avoid touching `buckets` on the fast
1917 // path.
1918 slot_start = thread_cache->GetFromCache(bucket_index, &slot_size);
1919
1920 // PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
1921 if (PA_LIKELY(slot_start)) {
1922 // This follows the logic of SlotSpanMetadata::GetUsableSize for small
1923 // buckets, which is too expensive to call here.
1924 // Keep it in sync!
1925 usable_size = AdjustSizeForExtrasSubtract(slot_size);
1926
1927 #if BUILDFLAG(PA_DCHECK_IS_ON)
1928 // Make sure that the allocated pointer comes from the same place it would
1929 // for a non-thread cache allocation.
1930 SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
1931 PA_DCHECK(IsValidSlotSpan(slot_span));
1932 PA_DCHECK(slot_span->bucket == &bucket_at(bucket_index));
1933 PA_DCHECK(slot_span->bucket->slot_size == slot_size);
1934 PA_DCHECK(usable_size == slot_span->GetUsableSize(this));
1935 // All large allocations must go through the RawAlloc path to correctly
1936 // set |usable_size|.
1937 PA_DCHECK(!slot_span->CanStoreRawSize());
1938 PA_DCHECK(!slot_span->bucket->is_direct_mapped());
1939 #endif
1940 } else {
1941 slot_start =
1942 RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
1943 &usable_size, &is_already_zeroed);
1944 }
1945 } else {
1946 slot_start =
1947 RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
1948 &usable_size, &is_already_zeroed);
1949 }
1950
1951 if (PA_UNLIKELY(!slot_start)) {
1952 return nullptr;
1953 }
1954
1955 if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
1956 thread_cache->RecordAllocation(usable_size);
1957 }
1958
1959 // Layout inside the slot:
1960 // |[refcnt]|...object...|[empty]|[cookie]|[unused]|
1961 // <----(a)----->
1962 // <--------(b)--------->
1963 // <--(c)---> + <--(c)--->
1964 // <---------(d)---------> + <--(d)--->
1965 // <-----------------(e)------------------>
1966 // <----------------------(f)---------------------->
1967 // (a) requested_size
1968 // (b) usable_size
1969 // (c) extras
1970 // (d) raw_size
1971 // (e) utilized_slot_size
1972 // (f) slot_size
1973 // Notes:
1974 // - Ref-count may or may not exist in the slot, depending on brp_enabled().
1975 // - Cookie exists only in the BUILDFLAG(PA_DCHECK_IS_ON) case.
1976 // - Think of raw_size as the minimum size required internally to satisfy
1977 // the allocation request (i.e. requested_size + extras)
1978 // - Note, at most one "empty" or "unused" space can occur at a time. It
1979 // occurs when slot_size is larger than raw_size. "unused" applies only to
1980 // large allocations (direct-mapped and single-slot slot spans) and "empty"
1981 // only to small allocations.
1982 // Why either-or, one might ask? We make an effort to put the trailing
1983 // cookie as close to data as possible to catch overflows (often
1984 // off-by-one), but that's possible only if we have enough space in metadata
1985 // to save raw_size, i.e. only for large allocations. For small allocations,
1986 // we have no other choice than putting the cookie at the very end of the
1987 // slot, thus creating the "empty" space.
1988 //
1989 // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
1990 // |...object...|[empty]|[cookie]|[unused]|[refcnt]|
1991 // <----(a)----->
1992 // <--------(b)--------->
1993 // <--(c)---> + <--(c)--->
1994 // <----(d)-----> + <--(d)---> + <--(d)--->
1995 // <-------------(e)-------------> + <--(e)--->
1996 // <----------------------(f)---------------------->
1997 // Notes:
1998 // If |slot_start| is not SystemPageSize()-aligned (possible only for small
1999 // allocations), ref-count of this slot is stored at the end of the previous
2000 // slot. Otherwise it is stored in ref-count table placed after the super page
2001 // metadata. For simplicity, the space for ref-count is still reserved at the
2002 // end of previous slot, even though redundant.
2003
2004 void* object = SlotStartToObject(slot_start);
2005
2006 #if BUILDFLAG(PA_DCHECK_IS_ON)
2007 // Add the cookie after the allocation.
2008 if (this->flags.allow_cookie) {
2009 internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
2010 usable_size);
2011 }
2012 #endif
2013
2014 // Fill the region kUninitializedByte (on debug builds, if not requested to 0)
2015 // or 0 (if requested and not 0 already).
2016 bool zero_fill = flags & AllocFlags::kZeroFill;
2017 // PA_LIKELY: operator new() calls malloc(), not calloc().
2018 if (PA_LIKELY(!zero_fill)) {
2019 // memset() can be really expensive.
2020 #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
2021 internal::DebugMemset(object, internal::kUninitializedByte, usable_size);
2022 #endif
2023 } else if (!is_already_zeroed) {
2024 memset(object, 0, usable_size);
2025 }
2026
2027 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
2028 // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
2029 // be false only for the aligned partition.
2030 if (brp_enabled()) {
2031 bool needs_mac11_malloc_size_hack = false;
2032 #if PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
2033 // Only apply hack to size 32 allocations on macOS 11. There is a buggy
2034 // assertion that malloc_size() equals sizeof(class_rw_t) which is 32.
2035 if (PA_UNLIKELY(this->flags.mac11_malloc_size_hack_enabled_ &&
2036 requested_size ==
2037 internal::kMac11MallocSizeHackRequestedSize)) {
2038 needs_mac11_malloc_size_hack = true;
2039 }
2040 #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
2041 auto* ref_count = new (internal::PartitionRefCountPointer(slot_start))
2042 internal::PartitionRefCount(needs_mac11_malloc_size_hack);
2043 #if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
2044 ref_count->SetRequestedSize(requested_size);
2045 #else
2046 (void)ref_count;
2047 #endif
2048 }
2049 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
2050
2051 #if BUILDFLAG(USE_STARSCAN)
2052 // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
2053 // default.
2054 if (PA_UNLIKELY(is_quarantine_enabled)) {
2055 if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
2056 // Mark the corresponding bits in the state bitmap as allocated.
2057 internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
2058 }
2059 }
2060 #endif // BUILDFLAG(USE_STARSCAN)
2061
2062 return object;
2063 }
2064
2065 template <bool thread_safe>
2066 PA_ALWAYS_INLINE uintptr_t
RawAlloc(Bucket * bucket,unsigned int flags,size_t raw_size,size_t slot_span_alignment,size_t * usable_size,bool * is_already_zeroed)2067 PartitionRoot<thread_safe>::RawAlloc(Bucket* bucket,
2068 unsigned int flags,
2069 size_t raw_size,
2070 size_t slot_span_alignment,
2071 size_t* usable_size,
2072 bool* is_already_zeroed) {
2073 ::partition_alloc::internal::ScopedGuard guard{lock_};
2074 return AllocFromBucket(bucket, flags, raw_size, slot_span_alignment,
2075 usable_size, is_already_zeroed);
2076 }
2077
2078 template <bool thread_safe>
AlignedAllocWithFlags(unsigned int flags,size_t alignment,size_t requested_size)2079 PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
2080 unsigned int flags,
2081 size_t alignment,
2082 size_t requested_size) {
2083 // Aligned allocation support relies on the natural alignment guarantees of
2084 // PartitionAlloc. Specifically, it relies on the fact that slots within a
2085 // slot span are aligned to slot size, from the beginning of the span.
2086 //
2087 // For alignments <=PartitionPageSize(), the code below adjusts the request
2088 // size to be a power of two, no less than alignment. Since slot spans are
2089 // aligned to PartitionPageSize(), which is also a power of two, this will
2090 // automatically guarantee alignment on the adjusted size boundary, thanks to
2091 // the natural alignment described above.
2092 //
2093 // For alignments >PartitionPageSize(), we need to pass the request down the
2094 // stack to only give us a slot span aligned to this more restrictive
2095 // boundary. In the current implementation, this code path will always
2096 // allocate a new slot span and hand us the first slot, so no need to adjust
2097 // the request size. As a consequence, allocating many small objects with
2098 // such a high alignment can cause a non-negligable fragmentation,
2099 // particularly if these allocations are back to back.
2100 // TODO(bartekn): We should check that this is not causing issues in practice.
2101 //
2102 // Extras before the allocation are forbidden as they shift the returned
2103 // allocation from the beginning of the slot, thus messing up alignment.
2104 // Extras after the allocation are acceptable, but they have to be taken into
2105 // account in the request size calculation to avoid crbug.com/1185484.
2106 PA_DCHECK(this->flags.allow_aligned_alloc);
2107 PA_DCHECK(!this->flags.extras_offset);
2108 // This is mandated by |posix_memalign()|, so should never fire.
2109 PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
2110 // Catch unsupported alignment requests early.
2111 PA_CHECK(alignment <= internal::kMaxSupportedAlignment);
2112 size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
2113
2114 size_t adjusted_size = requested_size;
2115 if (alignment <= internal::PartitionPageSize()) {
2116 // Handle cases such as size = 16, alignment = 64.
2117 // Wastes memory when a large alignment is requested with a small size, but
2118 // this is hard to avoid, and should not be too common.
2119 if (PA_UNLIKELY(raw_size < alignment)) {
2120 raw_size = alignment;
2121 } else {
2122 // PartitionAlloc only guarantees alignment for power-of-two sized
2123 // allocations. To make sure this applies here, round up the allocation
2124 // size.
2125 raw_size =
2126 static_cast<size_t>(1)
2127 << (int{sizeof(size_t) * 8} -
2128 partition_alloc::internal::base::bits::CountLeadingZeroBits(
2129 raw_size - 1));
2130 }
2131 PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
2132 // Adjust back, because AllocWithFlagsNoHooks/Alloc will adjust it again.
2133 adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
2134
2135 // Overflow check. adjusted_size must be larger or equal to requested_size.
2136 if (PA_UNLIKELY(adjusted_size < requested_size)) {
2137 if (flags & AllocFlags::kReturnNull) {
2138 return nullptr;
2139 }
2140 // OutOfMemoryDeathTest.AlignedAlloc requires
2141 // base::TerminateBecauseOutOfMemory (invoked by
2142 // PartitionExcessiveAllocationSize).
2143 internal::PartitionExcessiveAllocationSize(requested_size);
2144 // internal::PartitionExcessiveAllocationSize(size) causes OOM_CRASH.
2145 PA_NOTREACHED();
2146 }
2147 }
2148
2149 // Slot spans are naturally aligned on partition page size, but make sure you
2150 // don't pass anything less, because it'll mess up callee's calculations.
2151 size_t slot_span_alignment =
2152 std::max(alignment, internal::PartitionPageSize());
2153 bool no_hooks = flags & AllocFlags::kNoHooks;
2154 void* object =
2155 no_hooks
2156 ? AllocWithFlagsNoHooks(0, adjusted_size, slot_span_alignment)
2157 : AllocWithFlagsInternal(0, adjusted_size, slot_span_alignment, "");
2158
2159 // |alignment| is a power of two, but the compiler doesn't necessarily know
2160 // that. A regular % operation is very slow, make sure to use the equivalent,
2161 // faster form.
2162 // No need to MTE-untag, as it doesn't change alignment.
2163 PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
2164
2165 return object;
2166 }
2167
2168 template <bool thread_safe>
Alloc(size_t requested_size,const char * type_name)2169 PA_NOINLINE void* PartitionRoot<thread_safe>::Alloc(size_t requested_size,
2170 const char* type_name) {
2171 return AllocWithFlags(0, requested_size, type_name);
2172 }
2173
2174 template <bool thread_safe>
Realloc(void * ptr,size_t new_size,const char * type_name)2175 PA_NOINLINE void* PartitionRoot<thread_safe>::Realloc(void* ptr,
2176 size_t new_size,
2177 const char* type_name) {
2178 return ReallocWithFlags(0, ptr, new_size, type_name);
2179 }
2180
2181 template <bool thread_safe>
TryRealloc(void * ptr,size_t new_size,const char * type_name)2182 PA_NOINLINE void* PartitionRoot<thread_safe>::TryRealloc(
2183 void* ptr,
2184 size_t new_size,
2185 const char* type_name) {
2186 return ReallocWithFlags(AllocFlags::kReturnNull, ptr, new_size, type_name);
2187 }
2188
2189 // Return the capacity of the underlying slot (adjusted for extras) that'd be
2190 // used to satisfy a request of |size|. This doesn't mean this capacity would be
2191 // readily available. It merely means that if an allocation happened with that
2192 // returned value, it'd use the same amount of underlying memory as the
2193 // allocation with |size|.
2194 template <bool thread_safe>
2195 PA_ALWAYS_INLINE size_t
AllocationCapacityFromRequestedSize(size_t size)2196 PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
2197 size_t size) const {
2198 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
2199 return size;
2200 #else
2201 PA_DCHECK(PartitionRoot<thread_safe>::initialized);
2202 size = AdjustSizeForExtrasAdd(size);
2203 auto& bucket = bucket_at(SizeToBucketIndex(size, GetBucketDistribution()));
2204 PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
2205 PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
2206
2207 if (PA_LIKELY(!bucket.is_direct_mapped())) {
2208 size = bucket.slot_size;
2209 } else if (size > internal::MaxDirectMapped()) {
2210 // Too large to allocate => return the size unchanged.
2211 } else {
2212 size = GetDirectMapSlotSize(size);
2213 }
2214 size = AdjustSizeForExtrasSubtract(size);
2215 return size;
2216 #endif
2217 }
2218
2219 template <bool thread_safe>
GetOrCreateThreadCache()2220 ThreadCache* PartitionRoot<thread_safe>::GetOrCreateThreadCache() {
2221 ThreadCache* thread_cache = nullptr;
2222 if (PA_LIKELY(flags.with_thread_cache)) {
2223 thread_cache = ThreadCache::Get();
2224 if (PA_UNLIKELY(!ThreadCache::IsValid(thread_cache))) {
2225 thread_cache = MaybeInitThreadCache();
2226 }
2227 }
2228 return thread_cache;
2229 }
2230
2231 template <bool thread_safe>
GetThreadCache()2232 ThreadCache* PartitionRoot<thread_safe>::GetThreadCache() {
2233 return PA_LIKELY(flags.with_thread_cache) ? ThreadCache::Get() : nullptr;
2234 }
2235
2236 using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
2237
2238 static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
2239 internal::kPartitionCachelineSize,
2240 "Padding is incorrect");
2241
2242 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
2243 // Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
2244 using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
2245 #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
2246
2247 } // namespace partition_alloc
2248
2249 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
2250