• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
7 
8 #include <cstdint>
9 #include <cstring>
10 #include <limits>
11 #include <utility>
12 
13 #include "base/allocator/partition_allocator/address_pool_manager.h"
14 #include "base/allocator/partition_allocator/address_pool_manager_types.h"
15 #include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
16 #include "base/allocator/partition_allocator/partition_address_space.h"
17 #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
18 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
19 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
20 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
21 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
22 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
23 #include "base/allocator/partition_allocator/partition_alloc_check.h"
24 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
25 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
26 #include "base/allocator/partition_allocator/partition_bucket.h"
27 #include "base/allocator/partition_allocator/partition_freelist_entry.h"
28 #include "base/allocator/partition_allocator/reservation_offset_table.h"
29 #include "base/allocator/partition_allocator/tagging.h"
30 #include "build/build_config.h"
31 
32 #if BUILDFLAG(USE_STARSCAN)
33 #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
34 #endif
35 
36 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
37 #include "base/allocator/partition_allocator/partition_ref_count.h"
38 #endif
39 
40 namespace partition_alloc::internal {
41 
42 // An "extent" is a span of consecutive superpages. We link the partition's next
43 // extent (if there is one) to the very start of a superpage's metadata area.
44 template <bool thread_safe>
45 struct PartitionSuperPageExtentEntry {
46   PartitionRoot<thread_safe>* root;
47   PartitionSuperPageExtentEntry<thread_safe>* next;
48   uint16_t number_of_consecutive_super_pages;
49   uint16_t number_of_nonempty_slot_spans;
50 
51   PA_ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans();
52   PA_ALWAYS_INLINE void DecrementNumberOfNonemptySlotSpans();
53 };
54 static_assert(
55     sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
56     "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
57 static_assert(
58     kMaxSuperPagesInPool / kSuperPageSize <=
59         std::numeric_limits<
60             decltype(PartitionSuperPageExtentEntry<
61                      ThreadSafe>::number_of_consecutive_super_pages)>::max(),
62     "number_of_consecutive_super_pages must be big enough");
63 
64 // Returns the base of the first super page in the range of consecutive super
65 // pages.
66 //
67 // CAUTION! |extent| must point to the extent of the first super page in the
68 // range of consecutive super pages.
69 template <bool thread_safe>
SuperPagesBeginFromExtent(const PartitionSuperPageExtentEntry<thread_safe> * extent)70 PA_ALWAYS_INLINE uintptr_t SuperPagesBeginFromExtent(
71     const PartitionSuperPageExtentEntry<thread_safe>* extent) {
72   PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
73   uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
74   PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
75   return base::bits::AlignDown(extent_as_uintptr, kSuperPageAlignment);
76 }
77 
78 // Returns the end of the last super page in the range of consecutive super
79 // pages.
80 //
81 // CAUTION! |extent| must point to the extent of the first super page in the
82 // range of consecutive super pages.
83 template <bool thread_safe>
SuperPagesEndFromExtent(const PartitionSuperPageExtentEntry<thread_safe> * extent)84 PA_ALWAYS_INLINE uintptr_t SuperPagesEndFromExtent(
85     const PartitionSuperPageExtentEntry<thread_safe>* extent) {
86   return SuperPagesBeginFromExtent(extent) +
87          (extent->number_of_consecutive_super_pages * kSuperPageSize);
88 }
89 
90 #if BUILDFLAG(USE_STARSCAN)
91 using AllocationStateMap =
92     StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
93 #endif
94 
95 // Metadata of the slot span.
96 //
97 // Some notes on slot span states. It can be in one of four major states:
98 // 1) Active.
99 // 2) Full.
100 // 3) Empty.
101 // 4) Decommitted.
102 // An active slot span has available free slots, as well as allocated ones.
103 // A full slot span has no free slots. An empty slot span has no allocated
104 // slots, and a decommitted slot span is an empty one that had its backing
105 // memory released back to the system.
106 //
107 // There are three linked lists tracking slot spans. The "active" list is an
108 // approximation of a list of active slot spans. It is an approximation because
109 // full, empty and decommitted slot spans may briefly be present in the list
110 // until we next do a scan over it. The "empty" list holds mostly empty slot
111 // spans, but may briefly hold decommitted ones too. The "decommitted" list
112 // holds only decommitted slot spans.
113 //
114 // The significant slot span transitions are:
115 // - Free() will detect when a full slot span has a slot freed and immediately
116 //   return the slot span to the head of the active list.
117 // - Free() will detect when a slot span is fully emptied. It _may_ add it to
118 //   the empty list or it _may_ leave it on the active list until a future
119 //   list scan.
120 // - Alloc() _may_ scan the active page list in order to fulfil the request.
121 //   If it does this, full, empty and decommitted slot spans encountered will be
122 //   booted out of the active list. If there are no suitable active slot spans
123 //   found, an empty or decommitted slot spans (if one exists) will be pulled
124 //   from the empty/decommitted list on to the active list.
125 #pragma pack(push, 1)
126 template <bool thread_safe>
127 struct SlotSpanMetadata {
128  private:
129   PartitionFreelistEntry* freelist_head = nullptr;
130 
131  public:
132   // TODO(lizeb): Make as many fields as possible private or const, to
133   // encapsulate things more clearly.
134   SlotSpanMetadata<thread_safe>* next_slot_span = nullptr;
135   PartitionBucket<thread_safe>* const bucket = nullptr;
136 
137   // CHECK()ed in AllocNewSlotSpan().
138 #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
139   // System page size is not a constant on Apple OSes, but is either 4 or 16kiB
140   // (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
141   // PartitionPageSize() is 4 times the OS page size.
142   static constexpr size_t kMaxSlotsPerSlotSpan =
143       4 * (1 << 14) / kSmallestBucket;
144 #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
145   // System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
146   // currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
147   // so we use the 16 kiB maximum (64 kiB will crash).
148   static constexpr size_t kMaxSlotsPerSlotSpan =
149       4 * (1 << 14) / kSmallestBucket;
150 #else
151   // A slot span can "span" multiple PartitionPages, but then its slot size is
152   // larger, so it doesn't have as many slots.
153   static constexpr size_t kMaxSlotsPerSlotSpan =
154       PartitionPageSize() / kSmallestBucket;
155 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
156   // The maximum number of bits needed to cover all currently supported OSes.
157   static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
158   static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
159 
160   // |marked_full| isn't equivalent to being full. Slot span is marked as full
161   // iff it isn't on the active slot span list (or any other list).
162   uint32_t marked_full : 1;
163   // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
164   // be further differentiated by checking existence of the freelist.
165   uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
166   uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
167 
168  private:
169   const uint32_t can_store_raw_size_ : 1;
170   uint32_t freelist_is_sorted_ : 1;
171   uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
172   // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
173   // used.
174   uint16_t in_empty_cache_ : 1;
175   uint16_t empty_cache_index_ : kEmptyCacheIndexBits;  // < kMaxFreeableSpans.
176   uint16_t unused2_ : (16 - 1 - kEmptyCacheIndexBits);
177   // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
178   // in PartitionPage which has 2B worth of fields and must fit in 32B.
179 
180  public:
181   PA_COMPONENT_EXPORT(PARTITION_ALLOC)
182   explicit SlotSpanMetadata(PartitionBucket<thread_safe>* bucket);
183 
184   // Public API
185   // Note the matching Alloc() functions are in PartitionPage.
186   PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
187       size_t number_of_freed);
188   PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
189   PA_ALWAYS_INLINE void Free(uintptr_t ptr);
190   // Appends the passed freelist to the slot-span's freelist. Please note that
191   // the function doesn't increment the tags of the passed freelist entries,
192   // since FreeNoHooks() did it already.
193   PA_ALWAYS_INLINE void AppendFreeList(PartitionFreelistEntry* head,
194                                        PartitionFreelistEntry* tail,
195                                        size_t number_of_freed);
196 
197   void Decommit(PartitionRoot<thread_safe>* root);
198   void DecommitIfPossible(PartitionRoot<thread_safe>* root);
199 
200   // Sorts the freelist in ascending addresses order.
201   void SortFreelist();
202   // Inserts the slot span into the empty ring, making space for the new slot
203   // span, and potentially shrinking the ring.
204   void RegisterEmpty();
205 
206   // Pointer/address manipulation functions. These must be static as the input
207   // |slot_span| pointer may be the result of an offset calculation and
208   // therefore cannot be trusted. The objective of these functions is to
209   // sanitize this input.
210   PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
211       const SlotSpanMetadata* slot_span);
212   PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
213   PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
214   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
215   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
216       uintptr_t address);
217   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
218 
219   PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
220   ToSuperPageExtent() const;
221 
222   // Checks if it is feasible to store raw_size.
CanStoreRawSizeSlotSpanMetadata223   PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
224   // The caller is responsible for ensuring that raw_size can be stored before
225   // calling Set/GetRawSize.
226   PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
227   PA_ALWAYS_INLINE size_t GetRawSize() const;
228 
get_freelist_headSlotSpanMetadata229   PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
230     return freelist_head;
231   }
232   PA_ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
233 
234   // Returns size of the region used within a slot. The used region comprises
235   // of actual allocated data, extras and possibly empty space in the middle.
GetUtilizedSlotSizeSlotSpanMetadata236   PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
237     // The returned size can be:
238     // - The slot size for small buckets.
239     // - Exact size needed to satisfy allocation (incl. extras), for large
240     //   buckets and direct-mapped allocations (see also the comment in
241     //   CanStoreRawSize() for more info).
242     if (PA_LIKELY(!CanStoreRawSize())) {
243       return bucket->slot_size;
244     }
245     return GetRawSize();
246   }
247 
248   // This includes padding due to rounding done at allocation; we don't know the
249   // requested size at deallocation, so we use this in both places.
GetSlotSizeForBookkeepingSlotSpanMetadata250   PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
251     // This could be more precise for allocations where CanStoreRawSize()
252     // returns true (large allocations). However this is called for *every*
253     // allocation, so we don't want an extra branch there.
254     return bucket->slot_size;
255   }
256 
257   // Returns the size available to the app. It can be equal or higher than the
258   // requested size. If higher, the overage won't exceed what's actually usable
259   // by the app without a risk of running out of an allocated region or into
260   // PartitionAlloc's internal data (like extras).
261   PA_ALWAYS_INLINE size_t
GetUsableSizeSlotSpanMetadata262   GetUsableSize(PartitionRoot<thread_safe>* root) const {
263     // The returned size can be:
264     // - The slot size minus extras, for small buckets. This could be more than
265     //   requested size.
266     // - Raw size minus extras, for large buckets and direct-mapped allocations
267     //   (see also the comment in CanStoreRawSize() for more info). This is
268     //   equal to requested size.
269     return root->AdjustSizeForExtrasSubtract(GetUtilizedSlotSize());
270   }
271 
272   // Returns the total size of the slots that are currently provisioned.
GetProvisionedSizeSlotSpanMetadata273   PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
274     size_t num_provisioned_slots =
275         bucket->get_slots_per_span() - num_unprovisioned_slots;
276     size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
277     PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
278     return provisioned_size;
279   }
280 
281   // Return the number of entries in the freelist.
GetFreelistLengthSlotSpanMetadata282   size_t GetFreelistLength() const {
283     size_t num_provisioned_slots =
284         bucket->get_slots_per_span() - num_unprovisioned_slots;
285     return num_provisioned_slots - num_allocated_slots;
286   }
287 
288   PA_ALWAYS_INLINE void Reset();
289 
290   // TODO(ajwong): Can this be made private?  https://crbug.com/787153
291   PA_COMPONENT_EXPORT(PARTITION_ALLOC)
292   static const SlotSpanMetadata* get_sentinel_slot_span();
293   // The sentinel is not supposed to be modified and hence we mark it as const
294   // under the hood. However, we often store it together with mutable metadata
295   // objects and need a non-const pointer.
296   // You can use this function for this case, but you need to ensure that the
297   // returned object will not be written to.
298   static SlotSpanMetadata* get_sentinel_slot_span_non_const();
299 
300   // Slot span state getters.
301   PA_ALWAYS_INLINE bool is_active() const;
302   PA_ALWAYS_INLINE bool is_full() const;
303   PA_ALWAYS_INLINE bool is_empty() const;
304   PA_ALWAYS_INLINE bool is_decommitted() const;
in_empty_cacheSlotSpanMetadata305   PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
freelist_is_sortedSlotSpanMetadata306   PA_ALWAYS_INLINE bool freelist_is_sorted() const {
307     return freelist_is_sorted_;
308   }
set_freelist_sortedSlotSpanMetadata309   PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
310 
311  private:
312   // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
313   // span in the active list. We could use nullptr, but in that case we need to
314   // add a null-check branch to the hot allocation path. We want to avoid that.
315   //
316   // Note, this declaration is kept in the header as opposed to an anonymous
317   // namespace so the getter can be fully inlined.
318   static const SlotSpanMetadata sentinel_slot_span_;
319   // For the sentinel.
SlotSpanMetadataSlotSpanMetadata320   constexpr SlotSpanMetadata() noexcept
321       : marked_full(0),
322         num_allocated_slots(0),
323         num_unprovisioned_slots(0),
324         can_store_raw_size_(false),
325         freelist_is_sorted_(true),
326         unused1_(0),
327         in_empty_cache_(0),
328         empty_cache_index_(0),
329         unused2_(0) {}
330 };
331 #pragma pack(pop)
332 static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
333               "SlotSpanMetadata must fit into a Page Metadata slot.");
334 
335 // Metadata of a non-first partition page in a slot span.
336 struct SubsequentPageMetadata {
337   // Raw size is the size needed to satisfy the allocation (requested size +
338   // extras). If available, it can be used to report better statistics or to
339   // bring protective cookie closer to the allocated memory.
340   //
341   // It can be used only if:
342   // - there is no more than one slot in the slot span (otherwise we wouldn't
343   //   know which slot the raw size applies to)
344   // - there is more than one partition page in the slot span (the metadata of
345   //   the first one is used to store slot information, but the second one is
346   //   available for extra information)
347   size_t raw_size;
348 };
349 
350 // Each partition page has metadata associated with it. The metadata of the
351 // first page of a slot span, describes that slot span. If a slot span spans
352 // more than 1 page, the page metadata may contain rudimentary additional
353 // information.
354 // "Pack" the union so that common page metadata still fits within
355 // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
356 #pragma pack(push, 1)
357 template <bool thread_safe>
358 struct PartitionPage {
359   union {
360     SlotSpanMetadata<thread_safe> slot_span_metadata;
361 
362     SubsequentPageMetadata subsequent_page_metadata;
363 
364     // sizeof(PartitionPageMetadata) must always be:
365     // - a power of 2 (for fast modulo operations)
366     // - below kPageMetadataSize
367     //
368     // This makes sure that this is respected no matter the architecture.
369     char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
370   };
371 
372   // The first PartitionPage of the slot span holds its metadata. This offset
373   // tells how many pages in from that first page we are.
374   // For direct maps, the first page metadata (that isn't super page extent
375   // entry) uses this field to tell how many pages to the right the direct map
376   // metadata starts.
377   //
378   // 6 bits is enough to represent all possible offsets, given that the smallest
379   // partition page is 16kiB and the offset won't exceed 1MiB.
380   static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
381   static constexpr uint16_t kMaxSlotSpanMetadataOffset =
382       (1 << kMaxSlotSpanMetadataBits) - 1;
383   uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
384 
385   // |is_valid| tells whether the page is part of a slot span. If |false|,
386   // |has_valid_span_after_this| tells whether it's an unused region in between
387   // slot spans within the super page.
388   // Note, |is_valid| has been added for clarity, but if we ever need to save
389   // this bit, it can be inferred from:
390   //   |!slot_span_metadata_offset && slot_span_metadata->bucket|.
391   bool is_valid : 1;
392   bool has_valid_span_after_this : 1;
393   uint8_t unused;
394 
395   PA_ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
396 };
397 #pragma pack(pop)
398 static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
399               "PartitionPage must be able to fit in a metadata slot");
400 
401 // Certain functions rely on PartitionPage being either SlotSpanMetadata or
402 // SubsequentPageMetadata, and therefore freely casting between each other.
403 static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
404 static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
405                   0,
406               "");
407 
408 template <bool thread_safe>
PartitionSuperPageToMetadataArea(uintptr_t super_page)409 PA_ALWAYS_INLINE PartitionPage<thread_safe>* PartitionSuperPageToMetadataArea(
410     uintptr_t super_page) {
411   // This can't be just any super page, but it has to be the first super page of
412   // the reservation, as we assume here that the metadata is near its beginning.
413   PA_DCHECK(IsReservationStart(super_page));
414   PA_DCHECK(!(super_page & kSuperPageOffsetMask));
415   // The metadata area is exactly one system page (the guard page) into the
416   // super page.
417   return reinterpret_cast<PartitionPage<thread_safe>*>(super_page +
418                                                        SystemPageSize());
419 }
420 
GetSubsequentPageMetadata(const PartitionPage<ThreadSafe> * page)421 PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
422     const PartitionPage<ThreadSafe>* page) {
423   return &(page + 1)->subsequent_page_metadata;
424 }
425 
GetSubsequentPageMetadata(PartitionPage<ThreadSafe> * page)426 PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
427     PartitionPage<ThreadSafe>* page) {
428   return &(page + 1)->subsequent_page_metadata;
429 }
430 
431 template <bool thread_safe>
432 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
PartitionSuperPageToExtent(uintptr_t super_page)433 PartitionSuperPageToExtent(uintptr_t super_page) {
434   // The very first entry of the metadata is the super page extent entry.
435   return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
436       PartitionSuperPageToMetadataArea<thread_safe>(super_page));
437 }
438 
439 #if BUILDFLAG(USE_STARSCAN)
440 
441 // Size that should be reserved for state bitmap (if present) inside a super
442 // page. Elements of a super page are partition-page-aligned, hence the returned
443 // size is a multiple of partition page size.
444 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()445 ReservedStateBitmapSize() {
446   return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
447 }
448 
449 // Size that should be committed for state bitmap (if present) inside a super
450 // page. It is a multiple of system page size.
451 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
CommittedStateBitmapSize()452 CommittedStateBitmapSize() {
453   return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
454 }
455 
456 // Returns the address/pointer to the state bitmap in the super page. It's the
457 // caller's responsibility to ensure that the bitmaps even exist.
SuperPageStateBitmapAddr(uintptr_t super_page)458 PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
459   PA_DCHECK(!(super_page % kSuperPageAlignment));
460   return super_page + PartitionPageSize() +
461          (IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
462                                                : 0);
463 }
464 
SuperPageStateBitmap(uintptr_t super_page)465 PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
466     uintptr_t super_page) {
467   return reinterpret_cast<AllocationStateMap*>(
468       SuperPageStateBitmapAddr(super_page));
469 }
470 
471 #else  // BUILDFLAG(USE_STARSCAN)
472 
473 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()474 ReservedStateBitmapSize() {
475   return 0ull;
476 }
477 
478 #endif  // BUILDFLAG(USE_STARSCAN)
479 
480 PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,bool with_quarantine)481 SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
482                             bool with_quarantine) {
483   return PartitionPageSize() +
484          (is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
485          (with_quarantine ? ReservedStateBitmapSize() : 0);
486 }
487 
SuperPagePayloadBegin(uintptr_t super_page,bool with_quarantine)488 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
489                                                  bool with_quarantine) {
490   PA_DCHECK(!(super_page % kSuperPageAlignment));
491   return super_page +
492          SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
493                                      with_quarantine);
494 }
495 
SuperPagePayloadEndOffset()496 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
497   return kSuperPageSize - PartitionPageSize();
498 }
499 
SuperPagePayloadEnd(uintptr_t super_page)500 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
501   PA_DCHECK(!(super_page % kSuperPageAlignment));
502   return super_page + SuperPagePayloadEndOffset();
503 }
504 
SuperPagePayloadSize(uintptr_t super_page,bool with_quarantine)505 PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
506                                              bool with_quarantine) {
507   return SuperPagePayloadEnd(super_page) -
508          SuperPagePayloadBegin(super_page, with_quarantine);
509 }
510 
511 template <bool thread_safe>
512 PA_ALWAYS_INLINE void PartitionSuperPageExtentEntry<
IncrementNumberOfNonemptySlotSpans()513     thread_safe>::IncrementNumberOfNonemptySlotSpans() {
514 #if BUILDFLAG(PA_DCHECK_IS_ON)
515   uintptr_t super_page = base::bits::AlignDown(
516       reinterpret_cast<uintptr_t>(this), kSuperPageAlignment);
517   PA_DCHECK((SuperPagePayloadSize(super_page, root->IsQuarantineAllowed()) /
518              PartitionPageSize()) > number_of_nonempty_slot_spans);
519 #endif
520   ++number_of_nonempty_slot_spans;
521 }
522 
523 template <bool thread_safe>
524 PA_ALWAYS_INLINE void PartitionSuperPageExtentEntry<
DecrementNumberOfNonemptySlotSpans()525     thread_safe>::DecrementNumberOfNonemptySlotSpans() {
526   PA_DCHECK(number_of_nonempty_slot_spans);
527   --number_of_nonempty_slot_spans;
528 }
529 
530 template <bool thread_safe>
531 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
ToSuperPageExtent()532 SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
533   uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
534   return PartitionSuperPageToExtent<thread_safe>(super_page);
535 }
536 
537 // Returns whether the pointer lies within the super page's payload area (i.e.
538 // area devoted to slot spans). It doesn't check whether it's within a valid
539 // slot span. It merely ensures it doesn't fall in a meta-data region that would
540 // surely never contain user data.
IsWithinSuperPagePayload(uintptr_t address,bool with_quarantine)541 PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
542                                                bool with_quarantine) {
543   // Quarantine can only be enabled for normal buckets in the current code.
544   PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
545   uintptr_t super_page = address & kSuperPageBaseMask;
546   uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
547   uintptr_t payload_end = SuperPagePayloadEnd(super_page);
548   return address >= payload_start && address < payload_end;
549 }
550 
551 // Converts from an address inside a super page into a pointer to the
552 // PartitionPage object (within super pages's metadata) that describes the
553 // partition page where |address| is located. |address| doesn't have to be
554 // located within a valid (i.e. allocated) slot span, but must be within the
555 // super page's payload area (i.e. area devoted to slot spans).
556 //
557 // While it is generally valid for |ptr| to be in the middle of an allocation,
558 // care has to be taken with direct maps that span multiple super pages. This
559 // function's behavior is undefined if |ptr| lies in a subsequent super page.
560 template <bool thread_safe>
561 PA_ALWAYS_INLINE PartitionPage<thread_safe>*
FromAddr(uintptr_t address)562 PartitionPage<thread_safe>::FromAddr(uintptr_t address) {
563   uintptr_t super_page = address & kSuperPageBaseMask;
564 
565 #if BUILDFLAG(PA_DCHECK_IS_ON)
566   PA_DCHECK(IsReservationStart(super_page));
567   auto* extent = PartitionSuperPageToExtent<thread_safe>(super_page);
568   PA_DCHECK(IsWithinSuperPagePayload(address,
569                                      IsManagedByNormalBuckets(address) &&
570                                          extent->root->IsQuarantineAllowed()));
571 #endif
572 
573   uintptr_t partition_page_index =
574       (address & kSuperPageOffsetMask) >> PartitionPageShift();
575   // Index 0 is invalid because it is the super page extent metadata and the
576   // last index is invalid because the whole PartitionPage is set as guard
577   // pages. This repeats part of the payload PA_DCHECK above, which also checks
578   // for other exclusions.
579   PA_DCHECK(partition_page_index);
580   PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
581   return PartitionSuperPageToMetadataArea<thread_safe>(super_page) +
582          partition_page_index;
583 }
584 
585 // Converts from a pointer to the SlotSpanMetadata object (within a super
586 // pages's metadata) into a pointer to the beginning of the slot span. This
587 // works on direct maps too.
588 template <bool thread_safe>
ToSlotSpanStart(const SlotSpanMetadata * slot_span)589 PA_ALWAYS_INLINE uintptr_t SlotSpanMetadata<thread_safe>::ToSlotSpanStart(
590     const SlotSpanMetadata* slot_span) {
591   uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
592   uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
593 
594   // A valid |page| must be past the first guard System page and within
595   // the following metadata region.
596   PA_DCHECK(super_page_offset > SystemPageSize());
597   // Must be less than total metadata region.
598   PA_DCHECK(super_page_offset <
599             SystemPageSize() +
600                 (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
601   uintptr_t partition_page_index =
602       (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
603   // Index 0 is invalid because it is the super page extent metadata and the
604   // last index is invalid because the whole PartitionPage is set as guard
605   // pages.
606   PA_DCHECK(partition_page_index);
607   PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
608   uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
609   return super_page_base + (partition_page_index << PartitionPageShift());
610 }
611 
612 // Converts an address inside a slot span into a pointer to the SlotSpanMetadata
613 // object (within super pages's metadata) that describes the slot span
614 // containing that slot.
615 //
616 // CAUTION! For direct-mapped allocation, |address| has to be within the first
617 // partition page.
618 template <bool thread_safe>
619 PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
FromAddr(uintptr_t address)620 SlotSpanMetadata<thread_safe>::FromAddr(uintptr_t address) {
621   auto* page = PartitionPage<thread_safe>::FromAddr(address);
622   PA_DCHECK(page->is_valid);
623   // Partition pages in the same slot span share the same SlotSpanMetadata
624   // object (located in the first PartitionPage object of that span). Adjust
625   // for that.
626   page -= page->slot_span_metadata_offset;
627   PA_DCHECK(page->is_valid);
628   PA_DCHECK(!page->slot_span_metadata_offset);
629   auto* slot_span = &page->slot_span_metadata;
630   // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
631   PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
632   // For direct map, if |address| doesn't point within the first partition page,
633   // |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
634   // |slot_size| at 0.
635   PA_DCHECK(slot_span->bucket->slot_size);
636   return slot_span;
637 }
638 
639 // Like |FromAddr|, but asserts that |slot_start| indeed points to the
640 // beginning of a slot. It doesn't check if the slot is actually allocated.
641 //
642 // This works on direct maps too.
643 template <bool thread_safe>
644 PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
FromSlotStart(uintptr_t slot_start)645 SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
646   auto* slot_span = FromAddr(slot_start);
647 #if BUILDFLAG(PA_DCHECK_IS_ON)
648   // Checks that the pointer is a multiple of slot size.
649   uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
650   PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
651 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
652   return slot_span;
653 }
654 
655 // Like |FromAddr|, but asserts that |object| indeed points to the beginning of
656 // an object. It doesn't check if the object is actually allocated.
657 //
658 // This works on direct maps too.
659 template <bool thread_safe>
660 PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
FromObject(void * object)661 SlotSpanMetadata<thread_safe>::FromObject(void* object) {
662   uintptr_t object_addr = ObjectPtr2Addr(object);
663   auto* slot_span = FromAddr(object_addr);
664 #if BUILDFLAG(PA_DCHECK_IS_ON)
665   // Checks that the object is exactly |extras_offset| away from a multiple of
666   // slot size (i.e. from a slot start).
667   uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
668   auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
669   PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
670             root->flags.extras_offset);
671 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
672   return slot_span;
673 }
674 
675 // Like |FromAddr|, but asserts that |address| indeed points within an object.
676 // It doesn't check if the object is actually allocated.
677 //
678 // CAUTION! For direct-mapped allocation, |address| has to be within the first
679 // partition page.
680 template <bool thread_safe>
681 PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
FromObjectInnerAddr(uintptr_t address)682 SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
683   auto* slot_span = FromAddr(address);
684 #if BUILDFLAG(PA_DCHECK_IS_ON)
685   // Checks that the address is within the expected object boundaries.
686   uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
687   auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
688   uintptr_t shift_from_slot_start =
689       (address - slot_span_start) % slot_span->bucket->slot_size;
690   PA_DCHECK(shift_from_slot_start >= root->flags.extras_offset);
691   // Use <= to allow an address immediately past the object.
692   PA_DCHECK(shift_from_slot_start <=
693             root->flags.extras_offset + slot_span->GetUsableSize(root));
694 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
695   return slot_span;
696 }
697 template <bool thread_safe>
698 PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
FromObjectInnerPtr(void * ptr)699 SlotSpanMetadata<thread_safe>::FromObjectInnerPtr(void* ptr) {
700   return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
701 }
702 
703 template <bool thread_safe>
SetRawSize(size_t raw_size)704 PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(
705     size_t raw_size) {
706   PA_DCHECK(CanStoreRawSize());
707   auto* subsequent_page_metadata = GetSubsequentPageMetadata(
708       reinterpret_cast<PartitionPage<thread_safe>*>(this));
709   subsequent_page_metadata->raw_size = raw_size;
710 }
711 
712 template <bool thread_safe>
GetRawSize()713 PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
714   PA_DCHECK(CanStoreRawSize());
715   const auto* subsequent_page_metadata = GetSubsequentPageMetadata(
716       reinterpret_cast<const PartitionPage<thread_safe>*>(this));
717   return subsequent_page_metadata->raw_size;
718 }
719 
720 template <bool thread_safe>
SetFreelistHead(PartitionFreelistEntry * new_head)721 PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
722     PartitionFreelistEntry* new_head) {
723 #if BUILDFLAG(PA_DCHECK_IS_ON)
724   // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
725   // as well.
726   uintptr_t new_head_untagged = UntagPtr(new_head);
727   PA_DCHECK(!new_head ||
728             (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
729                 (new_head_untagged & kSuperPageBaseMask));
730 #endif
731   freelist_head = new_head;
732   // Inserted something new in the freelist, assume that it is not sorted
733   // anymore.
734   freelist_is_sorted_ = false;
735 }
736 
737 template <bool thread_safe>
738 PA_ALWAYS_INLINE PartitionFreelistEntry*
PopForAlloc(size_t size)739 SlotSpanMetadata<thread_safe>::PopForAlloc(size_t size) {
740   // Not using bucket->slot_size directly as the compiler doesn't know that
741   // |bucket->slot_size| is the same as |size|.
742   PA_DCHECK(size == bucket->slot_size);
743   PartitionFreelistEntry* result = freelist_head;
744   // Not setting freelist_is_sorted_ to false since this doesn't destroy
745   // ordering.
746   freelist_head = freelist_head->GetNext(size);
747   num_allocated_slots++;
748   return result;
749 }
750 
751 template <bool thread_safe>
Free(uintptr_t slot_start)752 PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(uintptr_t slot_start)
753     PA_EXCLUSIVE_LOCKS_REQUIRED(
754         PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
755 #if BUILDFLAG(PA_DCHECK_IS_ON)
756   auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
757   root->lock_.AssertAcquired();
758 #endif
759 
760   auto* entry = static_cast<internal::PartitionFreelistEntry*>(
761       SlotStartAddr2Ptr(slot_start));
762   // Catches an immediate double free.
763   PA_CHECK(entry != freelist_head);
764   // Look for double free one level deeper in debug.
765   PA_DCHECK(!freelist_head ||
766             entry != freelist_head->GetNext(bucket->slot_size));
767   entry->SetNext(freelist_head);
768   SetFreelistHead(entry);
769   // A best effort double-free check. Works only on empty slot spans.
770   PA_CHECK(num_allocated_slots);
771   --num_allocated_slots;
772   // If the span is marked full, or became empty, take the slow path to update
773   // internal state.
774   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
775     FreeSlowPath(1);
776   } else {
777     // All single-slot allocations must go through the slow path to
778     // correctly update the raw size.
779     PA_DCHECK(!CanStoreRawSize());
780   }
781 }
782 
783 template <bool thread_safe>
AppendFreeList(PartitionFreelistEntry * head,PartitionFreelistEntry * tail,size_t number_of_freed)784 PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::AppendFreeList(
785     PartitionFreelistEntry* head,
786     PartitionFreelistEntry* tail,
787     size_t number_of_freed)
788     PA_EXCLUSIVE_LOCKS_REQUIRED(
789         PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
790 #if BUILDFLAG(PA_DCHECK_IS_ON)
791   auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
792   root->lock_.AssertAcquired();
793   PA_DCHECK(!tail->GetNext(bucket->slot_size));
794   PA_DCHECK(number_of_freed);
795   PA_DCHECK(num_allocated_slots);
796   if (CanStoreRawSize()) {
797     PA_DCHECK(number_of_freed == 1);
798   }
799   {
800     size_t number_of_entries = 0;
801     for (auto* entry = head; entry;
802          entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
803       uintptr_t untagged_entry = UntagPtr(entry);
804       // Check that all entries belong to this slot span.
805       PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
806       PA_DCHECK(untagged_entry <
807                 ToSlotSpanStart(this) + bucket->get_bytes_per_span());
808     }
809     PA_DCHECK(number_of_entries == number_of_freed);
810   }
811 #endif
812 
813   tail->SetNext(freelist_head);
814   SetFreelistHead(head);
815   PA_DCHECK(num_allocated_slots >= number_of_freed);
816   num_allocated_slots -= number_of_freed;
817   // If the span is marked full, or became empty, take the slow path to update
818   // internal state.
819   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
820     FreeSlowPath(number_of_freed);
821   } else {
822     // All single-slot allocations must go through the slow path to
823     // correctly update the raw size.
824     PA_DCHECK(!CanStoreRawSize());
825   }
826 }
827 
828 template <bool thread_safe>
is_active()829 PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
830   PA_DCHECK(this != get_sentinel_slot_span());
831   bool ret =
832       (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
833   if (ret) {
834     PA_DCHECK(!marked_full);
835     PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
836   }
837   return ret;
838 }
839 
840 template <bool thread_safe>
is_full()841 PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
842   PA_DCHECK(this != get_sentinel_slot_span());
843   bool ret = (num_allocated_slots == bucket->get_slots_per_span());
844   if (ret) {
845     PA_DCHECK(!freelist_head);
846     PA_DCHECK(!num_unprovisioned_slots);
847     // May or may not be marked full, so don't check for that.
848   }
849   return ret;
850 }
851 
852 template <bool thread_safe>
is_empty()853 PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
854   PA_DCHECK(this != get_sentinel_slot_span());
855   bool ret = (!num_allocated_slots && freelist_head);
856   if (ret) {
857     PA_DCHECK(!marked_full);
858   }
859   return ret;
860 }
861 
862 template <bool thread_safe>
is_decommitted()863 PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
864   PA_DCHECK(this != get_sentinel_slot_span());
865   bool ret = (!num_allocated_slots && !freelist_head);
866   if (ret) {
867     PA_DCHECK(!marked_full);
868     PA_DCHECK(!num_unprovisioned_slots);
869     PA_DCHECK(!in_empty_cache_);
870   }
871   return ret;
872 }
873 
874 template <bool thread_safe>
Reset()875 PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
876   PA_DCHECK(is_decommitted());
877 
878   num_unprovisioned_slots = bucket->get_slots_per_span();
879   PA_DCHECK(num_unprovisioned_slots);
880 
881   ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
882 
883   next_slot_span = nullptr;
884 }
885 
886 #if BUILDFLAG(USE_STARSCAN)
887 // Returns the state bitmap from an address within a normal-bucket super page.
888 // It's the caller's responsibility to ensure that the bitmap exists.
StateBitmapFromAddr(uintptr_t address)889 PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
890   PA_DCHECK(IsManagedByNormalBuckets(address));
891   uintptr_t super_page = address & kSuperPageBaseMask;
892   return SuperPageStateBitmap(super_page);
893 }
894 #endif  // BUILDFLAG(USE_STARSCAN)
895 
896 // Iterates over all slot spans in a super-page. |Callback| must return true if
897 // early return is needed.
898 template <bool thread_safe, typename Callback>
IterateSlotSpans(uintptr_t super_page,bool with_quarantine,Callback callback)899 void IterateSlotSpans(uintptr_t super_page,
900                       bool with_quarantine,
901                       Callback callback) {
902 #if BUILDFLAG(PA_DCHECK_IS_ON)
903   PA_DCHECK(!(super_page % kSuperPageAlignment));
904   auto* extent_entry = PartitionSuperPageToExtent<thread_safe>(super_page);
905   extent_entry->root->lock_.AssertAcquired();
906 #endif
907 
908   using Page = PartitionPage<thread_safe>;
909   using SlotSpan = SlotSpanMetadata<thread_safe>;
910   auto* const first_page =
911       Page::FromAddr(SuperPagePayloadBegin(super_page, with_quarantine));
912   auto* const last_page =
913       Page::FromAddr(SuperPagePayloadEnd(super_page) - PartitionPageSize());
914   Page* page;
915   SlotSpan* slot_span;
916   for (page = first_page; page <= last_page;) {
917     PA_DCHECK(!page->slot_span_metadata_offset);  // Ensure slot span beginning.
918     if (!page->is_valid) {
919       if (page->has_valid_span_after_this) {
920         // The page doesn't represent a valid slot span, but there is another
921         // one somewhere after this. Keep iterating to find it.
922         ++page;
923         continue;
924       }
925       // There are currently no valid spans from here on. No need to iterate
926       // the rest of the super page.
927       break;
928     }
929     slot_span = &page->slot_span_metadata;
930     if (callback(slot_span)) {
931       return;
932     }
933     page += slot_span->bucket->get_pages_per_slot_span();
934   }
935   // Each super page must have at least one valid slot span.
936   PA_DCHECK(page > first_page);
937   // Just a quick check that the search ended at a valid slot span and there
938   // was no unnecessary iteration over gaps afterwards.
939   PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
940                         slot_span->bucket->get_pages_per_slot_span());
941 }
942 
943 }  // namespace partition_alloc::internal
944 
945 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
946