• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
7 
8 #include <cstdint>
9 
10 #include "build/build_config.h"
11 #include "partition_alloc/address_pool_manager.h"
12 #include "partition_alloc/address_pool_manager_types.h"
13 #include "partition_alloc/freeslot_bitmap_constants.h"
14 #include "partition_alloc/partition_address_space.h"
15 #include "partition_alloc/partition_alloc_base/bits.h"
16 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
17 #include "partition_alloc/partition_alloc_base/component_export.h"
18 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
19 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_check.h"
22 #include "partition_alloc/partition_alloc_constants.h"
23 #include "partition_alloc/partition_alloc_forward.h"
24 #include "partition_alloc/partition_bucket.h"
25 #include "partition_alloc/partition_dcheck_helper.h"
26 #include "partition_alloc/partition_freelist_entry.h"
27 #include "partition_alloc/partition_page_constants.h"
28 #include "partition_alloc/partition_superpage_extent_entry.h"
29 #include "partition_alloc/reservation_offset_table.h"
30 
31 #if BUILDFLAG(USE_STARSCAN)
32 #include "partition_alloc/starscan/state_bitmap.h"
33 #endif
34 
35 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
36 #include "partition_alloc/partition_ref_count.h"
37 #endif
38 
39 #if BUILDFLAG(PA_DCHECK_IS_ON)
40 #include "partition_alloc/tagging.h"
41 #endif
42 
43 namespace partition_alloc::internal {
44 
45 #if BUILDFLAG(USE_STARSCAN)
46 using AllocationStateMap =
47     StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
48 #endif
49 
50 // Metadata of the slot span.
51 //
52 // Some notes on slot span states. It can be in one of four major states:
53 // 1) Active.
54 // 2) Full.
55 // 3) Empty.
56 // 4) Decommitted.
57 // An active slot span has available free slots, as well as allocated ones.
58 // A full slot span has no free slots. An empty slot span has no allocated
59 // slots, and a decommitted slot span is an empty one that had its backing
60 // memory released back to the system.
61 //
62 // There are three linked lists tracking slot spans. The "active" list is an
63 // approximation of a list of active slot spans. It is an approximation because
64 // full, empty and decommitted slot spans may briefly be present in the list
65 // until we next do a scan over it. The "empty" list holds mostly empty slot
66 // spans, but may briefly hold decommitted ones too. The "decommitted" list
67 // holds only decommitted slot spans.
68 //
69 // The significant slot span transitions are:
70 // - Free() will detect when a full slot span has a slot freed and immediately
71 //   return the slot span to the head of the active list.
72 // - Free() will detect when a slot span is fully emptied. It _may_ add it to
73 //   the empty list or it _may_ leave it on the active list until a future
74 //   list scan.
75 // - Alloc() _may_ scan the active page list in order to fulfil the request.
76 //   If it does this, full, empty and decommitted slot spans encountered will be
77 //   booted out of the active list. If there are no suitable active slot spans
78 //   found, an empty or decommitted slot spans (if one exists) will be pulled
79 //   from the empty/decommitted list on to the active list.
80 #pragma pack(push, 1)
81 struct SlotSpanMetadata {
82  private:
83   union {
84     EncodedNextFreelistEntry* freelist_head = nullptr;
85 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
86     PoolOffsetFreelistEntry* pool_offset_freelist_head = nullptr;
87 #endif
88   };
89 
90  public:
91   // TODO(lizeb): Make as many fields as possible private or const, to
92   // encapsulate things more clearly.
93   SlotSpanMetadata* next_slot_span = nullptr;
94   PartitionBucket* const bucket = nullptr;
95 
96   // CHECK()ed in AllocNewSlotSpan().
97   // The maximum number of bits needed to cover all currently supported OSes.
98   static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
99   static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
100 
101   // |marked_full| isn't equivalent to being full. Slot span is marked as full
102   // iff it isn't on the active slot span list (or any other list).
103   uint32_t marked_full : 1;
104   // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
105   // be further differentiated by checking existence of the freelist.
106   uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
107   uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
108 
109  private:
110   const uint32_t can_store_raw_size_ : 1;
111   uint32_t freelist_is_sorted_ : 1;
112   uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
113   // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
114   // used.
115   uint16_t in_empty_cache_ : 1;
116   uint16_t empty_cache_index_ : kEmptyCacheIndexBits;  // < kMaxFreeableSpans.
117   uint16_t unused2_ : (16 - 1 - kEmptyCacheIndexBits);
118   // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
119   // in PartitionPage which has 2B worth of fields and must fit in 32B.
120 
121  public:
122   PA_COMPONENT_EXPORT(PARTITION_ALLOC)
123   explicit SlotSpanMetadata(PartitionBucket* bucket);
124 
125   inline SlotSpanMetadata(const SlotSpanMetadata&);
126 
127   // Public API
128   // Note the matching Alloc() functions are in PartitionPage.
129   PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
130       size_t number_of_freed);
131   PA_ALWAYS_INLINE EncodedNextFreelistEntry* PopForAlloc(size_t size);
132   PA_ALWAYS_INLINE void Free(uintptr_t ptr, PartitionRoot* root)
133       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
134 
135 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
136   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* PopPoolOffsetFreelistForAlloc(
137       size_t size);
138   PA_ALWAYS_INLINE void FreeForPoolOffsetFreelist(uintptr_t ptr,
139                                                   PartitionRoot* root)
140       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
141 #endif
142 
143   // Appends the passed freelist to the slot-span's freelist. Please note that
144   // the function doesn't increment the tags of the passed freelist entries,
145   // since FreeInline() did it already.
146   PA_ALWAYS_INLINE void AppendFreeList(EncodedNextFreelistEntry* head,
147                                        EncodedNextFreelistEntry* tail,
148                                        size_t number_of_freed,
149                                        PartitionRoot* root)
150       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
151 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
152   PA_ALWAYS_INLINE void AppendPoolOffsetFreeList(PoolOffsetFreelistEntry* head,
153                                                  PoolOffsetFreelistEntry* tail,
154                                                  size_t number_of_freed,
155                                                  PartitionRoot* root)
156       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
157 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
158 
159   void Decommit(PartitionRoot* root);
160   void DecommitIfPossible(PartitionRoot* root);
161 
162   // Sorts the freelist in ascending addresses order.
163   void SortFreelist();
164   // Inserts the slot span into the empty ring, making space for the new slot
165   // span, and potentially shrinking the ring.
166   void RegisterEmpty();
167 
168   // Pointer/address manipulation functions. These must be static as the input
169   // |slot_span| pointer may be the result of an offset calculation and
170   // therefore cannot be trusted. The objective of these functions is to
171   // sanitize this input.
172   PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
173       const SlotSpanMetadata* slot_span);
174   PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
175   PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
176   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
177   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
178       uintptr_t address);
179   PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
180 
181   PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* ToSuperPageExtent() const;
182 
183   // Checks if it is feasible to store raw_size.
CanStoreRawSizeSlotSpanMetadata184   PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
185   // The caller is responsible for ensuring that raw_size can be stored before
186   // calling Set/GetRawSize.
187   PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
188   PA_ALWAYS_INLINE size_t GetRawSize() const;
189 
get_freelist_headSlotSpanMetadata190   PA_ALWAYS_INLINE EncodedNextFreelistEntry* get_freelist_head() const {
191     return freelist_head;
192   }
193   PA_ALWAYS_INLINE void SetFreelistHead(EncodedNextFreelistEntry* new_head);
194 
195 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
get_pool_offset_freelist_headSlotSpanMetadata196   PA_ALWAYS_INLINE PoolOffsetFreelistEntry* get_pool_offset_freelist_head()
197       const {
198     return pool_offset_freelist_head;
199   }
200   PA_ALWAYS_INLINE void SetPoolOffsetFreelistHead(
201       PoolOffsetFreelistEntry* new_head);
202 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
203 
204   // Returns size of the region used within a slot. The used region comprises
205   // of actual allocated data, extras and possibly empty space in the middle.
GetUtilizedSlotSizeSlotSpanMetadata206   PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
207     // The returned size can be:
208     // - The slot size for small buckets.
209     // - Exact size needed to satisfy allocation (incl. extras), for large
210     //   buckets and direct-mapped allocations (see also the comment in
211     //   CanStoreRawSize() for more info).
212     if (PA_LIKELY(!CanStoreRawSize())) {
213       return bucket->slot_size;
214     }
215     return GetRawSize();
216   }
217 
218   // This includes padding due to rounding done at allocation; we don't know the
219   // requested size at deallocation, so we use this in both places.
GetSlotSizeForBookkeepingSlotSpanMetadata220   PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
221     // This could be more precise for allocations where CanStoreRawSize()
222     // returns true (large allocations). However this is called for *every*
223     // allocation, so we don't want an extra branch there.
224     return bucket->slot_size;
225   }
226 
227   // Returns the total size of the slots that are currently provisioned.
GetProvisionedSizeSlotSpanMetadata228   PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
229     size_t num_provisioned_slots =
230         bucket->get_slots_per_span() - num_unprovisioned_slots;
231     size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
232     PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
233     return provisioned_size;
234   }
235 
236   // Return the number of entries in the freelist.
GetFreelistLengthSlotSpanMetadata237   size_t GetFreelistLength() const {
238     size_t num_provisioned_slots =
239         bucket->get_slots_per_span() - num_unprovisioned_slots;
240     return num_provisioned_slots - num_allocated_slots;
241   }
242 
243   PA_ALWAYS_INLINE void Reset();
244 
245   // TODO(ajwong): Can this be made private?  https://crbug.com/787153
246   PA_COMPONENT_EXPORT(PARTITION_ALLOC)
247   static const SlotSpanMetadata* get_sentinel_slot_span();
248   // The sentinel is not supposed to be modified and hence we mark it as const
249   // under the hood. However, we often store it together with mutable metadata
250   // objects and need a non-const pointer.
251   // You can use this function for this case, but you need to ensure that the
252   // returned object will not be written to.
253   static SlotSpanMetadata* get_sentinel_slot_span_non_const();
254 
255   // Slot span state getters.
256   PA_ALWAYS_INLINE bool is_active() const;
257   PA_ALWAYS_INLINE bool is_full() const;
258   PA_ALWAYS_INLINE bool is_empty() const;
259   PA_ALWAYS_INLINE bool is_decommitted() const;
in_empty_cacheSlotSpanMetadata260   PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
freelist_is_sortedSlotSpanMetadata261   PA_ALWAYS_INLINE bool freelist_is_sorted() const {
262     return freelist_is_sorted_;
263   }
set_freelist_sortedSlotSpanMetadata264   PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
265 
266  private:
267   // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
268   // span in the active list. We could use nullptr, but in that case we need to
269   // add a null-check branch to the hot allocation path. We want to avoid that.
270   //
271   // Note, this declaration is kept in the header as opposed to an anonymous
272   // namespace so the getter can be fully inlined.
273   static const SlotSpanMetadata sentinel_slot_span_;
274   // For the sentinel.
275   inline constexpr SlotSpanMetadata() noexcept;
276 };
277 #pragma pack(pop)
278 static_assert(sizeof(SlotSpanMetadata) <= kPageMetadataSize,
279               "SlotSpanMetadata must fit into a Page Metadata slot.");
280 
SlotSpanMetadata()281 inline constexpr SlotSpanMetadata::SlotSpanMetadata() noexcept
282     : marked_full(0),
283       num_allocated_slots(0),
284       num_unprovisioned_slots(0),
285       can_store_raw_size_(false),
286       freelist_is_sorted_(true),
287       unused1_(0),
288       in_empty_cache_(0),
289       empty_cache_index_(0),
290       unused2_(0) {
291   (void)unused1_;
292   (void)unused2_;
293 }
294 
295 inline SlotSpanMetadata::SlotSpanMetadata(const SlotSpanMetadata&) = default;
296 
297 // Metadata of a non-first partition page in a slot span.
298 struct SubsequentPageMetadata {
299   // Raw size is the size needed to satisfy the allocation (requested size +
300   // extras). If available, it can be used to report better statistics or to
301   // bring protective cookie closer to the allocated memory.
302   //
303   // It can be used only if:
304   // - there is no more than one slot in the slot span (otherwise we wouldn't
305   //   know which slot the raw size applies to)
306   // - there is more than one partition page in the slot span (the metadata of
307   //   the first one is used to store slot information, but the second one is
308   //   available for extra information)
309   size_t raw_size;
310 };
311 
312 // Each partition page has metadata associated with it. The metadata of the
313 // first page of a slot span, describes that slot span. If a slot span spans
314 // more than 1 page, the page metadata may contain rudimentary additional
315 // information.
316 // "Pack" the union so that common page metadata still fits within
317 // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
318 #pragma pack(push, 1)
319 struct PartitionPage {
320   union {
321     SlotSpanMetadata slot_span_metadata;
322 
323     SubsequentPageMetadata subsequent_page_metadata;
324 
325     // sizeof(PartitionPageMetadata) must always be:
326     // - a power of 2 (for fast modulo operations)
327     // - below kPageMetadataSize
328     //
329     // This makes sure that this is respected no matter the architecture.
330     char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
331   };
332 
333   // The first PartitionPage of the slot span holds its metadata. This offset
334   // tells how many pages in from that first page we are.
335   // For direct maps, the first page metadata (that isn't super page extent
336   // entry) uses this field to tell how many pages to the right the direct map
337   // metadata starts.
338   //
339   // 6 bits is enough to represent all possible offsets, given that the smallest
340   // partition page is 16kiB and the offset won't exceed 1MiB.
341   static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
342   static constexpr uint16_t kMaxSlotSpanMetadataOffset =
343       (1 << kMaxSlotSpanMetadataBits) - 1;
344   uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
345 
346   // |is_valid| tells whether the page is part of a slot span. If |false|,
347   // |has_valid_span_after_this| tells whether it's an unused region in between
348   // slot spans within the super page.
349   // Note, |is_valid| has been added for clarity, but if we ever need to save
350   // this bit, it can be inferred from:
351   //   |!slot_span_metadata_offset && slot_span_metadata->bucket|.
352   bool is_valid : 1;
353   bool has_valid_span_after_this : 1;
354   uint8_t unused;
355 
356   PA_ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
357 };
358 #pragma pack(pop)
359 static_assert(sizeof(PartitionPage) == kPageMetadataSize,
360               "PartitionPage must be able to fit in a metadata slot");
361 
362 // Certain functions rely on PartitionPage being either SlotSpanMetadata or
363 // SubsequentPageMetadata, and therefore freely casting between each other.
364 // TODO(https://crbug.com/1500662) Stop ignoring the -Winvalid-offsetof warning.
365 #if defined(__clang__)
366 #pragma clang diagnostic push
367 #pragma clang diagnostic ignored "-Winvalid-offsetof"
368 #endif
369 static_assert(offsetof(PartitionPage, slot_span_metadata) == 0, "");
370 static_assert(offsetof(PartitionPage, subsequent_page_metadata) == 0, "");
371 #if defined(__clang__)
372 #pragma clang diagnostic pop
373 #endif
374 
PartitionSuperPageToMetadataArea(uintptr_t super_page)375 PA_ALWAYS_INLINE PartitionPage* PartitionSuperPageToMetadataArea(
376     uintptr_t super_page) {
377   // This can't be just any super page, but it has to be the first super page of
378   // the reservation, as we assume here that the metadata is near its beginning.
379   PA_DCHECK(IsReservationStart(super_page));
380   PA_DCHECK(!(super_page & kSuperPageOffsetMask));
381   // The metadata area is exactly one system page (the guard page) into the
382   // super page.
383   return reinterpret_cast<PartitionPage*>(super_page + SystemPageSize());
384 }
385 
GetSubsequentPageMetadata(const PartitionPage * page)386 PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
387     const PartitionPage* page) {
388   return &(page + 1)->subsequent_page_metadata;
389 }
390 
GetSubsequentPageMetadata(PartitionPage * page)391 PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
392     PartitionPage* page) {
393   return &(page + 1)->subsequent_page_metadata;
394 }
395 
PartitionSuperPageToExtent(uintptr_t super_page)396 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry* PartitionSuperPageToExtent(
397     uintptr_t super_page) {
398   // The very first entry of the metadata is the super page extent entry.
399   return reinterpret_cast<PartitionSuperPageExtentEntry*>(
400       PartitionSuperPageToMetadataArea(super_page));
401 }
402 
403 #if BUILDFLAG(USE_STARSCAN)
404 
405 // Size that should be reserved for state bitmap (if present) inside a super
406 // page. Elements of a super page are partition-page-aligned, hence the returned
407 // size is a multiple of partition page size.
408 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()409 ReservedStateBitmapSize() {
410   return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
411 }
412 
413 // Size that should be committed for state bitmap (if present) inside a super
414 // page. It is a multiple of system page size.
415 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
CommittedStateBitmapSize()416 CommittedStateBitmapSize() {
417   return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
418 }
419 
420 // Returns the address/pointer to the state bitmap in the super page. It's the
421 // caller's responsibility to ensure that the bitmaps even exist.
SuperPageStateBitmapAddr(uintptr_t super_page)422 PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
423   PA_DCHECK(!(super_page % kSuperPageAlignment));
424   return super_page + PartitionPageSize() +
425          (IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
426                                                : 0);
427 }
428 
SuperPageStateBitmap(uintptr_t super_page)429 PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
430     uintptr_t super_page) {
431   return reinterpret_cast<AllocationStateMap*>(
432       SuperPageStateBitmapAddr(super_page));
433 }
434 
435 #else  // BUILDFLAG(USE_STARSCAN)
436 
437 PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
ReservedStateBitmapSize()438 ReservedStateBitmapSize() {
439   return 0ull;
440 }
441 
442 #endif  // BUILDFLAG(USE_STARSCAN)
443 
444 PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,bool with_quarantine)445 SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
446                             bool with_quarantine) {
447   return PartitionPageSize() +
448          (is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
449          (with_quarantine ? ReservedStateBitmapSize() : 0);
450 }
451 
SuperPagePayloadBegin(uintptr_t super_page,bool with_quarantine)452 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
453                                                  bool with_quarantine) {
454   PA_DCHECK(!(super_page % kSuperPageAlignment));
455   return super_page +
456          SuperPagePayloadStartOffset(IsManagedByNormalBuckets(super_page),
457                                      with_quarantine);
458 }
459 
SuperPagePayloadEndOffset()460 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEndOffset() {
461   return kSuperPageSize - PartitionPageSize();
462 }
463 
SuperPagePayloadEnd(uintptr_t super_page)464 PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
465   PA_DCHECK(!(super_page % kSuperPageAlignment));
466   return super_page + SuperPagePayloadEndOffset();
467 }
468 
SuperPagePayloadSize(uintptr_t super_page,bool with_quarantine)469 PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
470                                              bool with_quarantine) {
471   return SuperPagePayloadEnd(super_page) -
472          SuperPagePayloadBegin(super_page, with_quarantine);
473 }
474 
475 PA_ALWAYS_INLINE PartitionSuperPageExtentEntry*
ToSuperPageExtent()476 SlotSpanMetadata::ToSuperPageExtent() const {
477   uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
478   return PartitionSuperPageToExtent(super_page);
479 }
480 
481 // Returns whether the pointer lies within the super page's payload area (i.e.
482 // area devoted to slot spans). It doesn't check whether it's within a valid
483 // slot span. It merely ensures it doesn't fall in a meta-data region that would
484 // surely never contain user data.
IsWithinSuperPagePayload(uintptr_t address,bool with_quarantine)485 PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
486                                                bool with_quarantine) {
487   // Quarantine can only be enabled for normal buckets in the current code.
488   PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
489   uintptr_t super_page = address & kSuperPageBaseMask;
490   uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
491   uintptr_t payload_end = SuperPagePayloadEnd(super_page);
492   return address >= payload_start && address < payload_end;
493 }
494 
495 // Converts from an address inside a super page into a pointer to the
496 // PartitionPage object (within super pages's metadata) that describes the
497 // partition page where |address| is located. |address| doesn't have to be
498 // located within a valid (i.e. allocated) slot span, but must be within the
499 // super page's payload area (i.e. area devoted to slot spans).
500 //
501 // While it is generally valid for |ptr| to be in the middle of an allocation,
502 // care has to be taken with direct maps that span multiple super pages. This
503 // function's behavior is undefined if |ptr| lies in a subsequent super page.
FromAddr(uintptr_t address)504 PA_ALWAYS_INLINE PartitionPage* PartitionPage::FromAddr(uintptr_t address) {
505   uintptr_t super_page = address & kSuperPageBaseMask;
506 
507 #if BUILDFLAG(PA_DCHECK_IS_ON)
508   PA_DCHECK(IsReservationStart(super_page));
509   DCheckIsWithInSuperPagePayload(address);
510 #endif
511 
512   uintptr_t partition_page_index =
513       (address & kSuperPageOffsetMask) >> PartitionPageShift();
514   // Index 0 is invalid because it is the super page extent metadata and the
515   // last index is invalid because the whole PartitionPage is set as guard
516   // pages. This repeats part of the payload PA_DCHECK above, which also checks
517   // for other exclusions.
518   PA_DCHECK(partition_page_index);
519   PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
520   return PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
521 }
522 
523 // Converts from a pointer to the SlotSpanMetadata object (within a super
524 // pages's metadata) into a pointer to the beginning of the slot span. This
525 // works on direct maps too.
526 PA_ALWAYS_INLINE uintptr_t
ToSlotSpanStart(const SlotSpanMetadata * slot_span)527 SlotSpanMetadata::ToSlotSpanStart(const SlotSpanMetadata* slot_span) {
528   uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
529   uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
530 
531   // A valid |page| must be past the first guard System page and within
532   // the following metadata region.
533   PA_DCHECK(super_page_offset > SystemPageSize());
534   // Must be less than total metadata region.
535   PA_DCHECK(super_page_offset <
536             SystemPageSize() +
537                 (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
538   uintptr_t partition_page_index =
539       (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
540   // Index 0 is invalid because it is the super page extent metadata and the
541   // last index is invalid because the whole PartitionPage is set as guard
542   // pages.
543   PA_DCHECK(partition_page_index);
544   PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
545   uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
546   return super_page_base + (partition_page_index << PartitionPageShift());
547 }
548 
549 // Converts an address inside a slot span into a pointer to the SlotSpanMetadata
550 // object (within super pages's metadata) that describes the slot span
551 // containing that slot.
552 //
553 // CAUTION! For direct-mapped allocation, |address| has to be within the first
554 // partition page.
FromAddr(uintptr_t address)555 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromAddr(
556     uintptr_t address) {
557   auto* page = PartitionPage::FromAddr(address);
558   PA_DCHECK(page->is_valid);
559   // Partition pages in the same slot span share the same SlotSpanMetadata
560   // object (located in the first PartitionPage object of that span). Adjust
561   // for that.
562   page -= page->slot_span_metadata_offset;
563   PA_DCHECK(page->is_valid);
564   PA_DCHECK(!page->slot_span_metadata_offset);
565   auto* slot_span = &page->slot_span_metadata;
566   // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
567   DCheckIsValidSlotSpan(slot_span);
568   // For direct map, if |address| doesn't point within the first partition page,
569   // |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
570   // |slot_size| at 0.
571   PA_DCHECK(slot_span->bucket->slot_size);
572   return slot_span;
573 }
574 
575 // Like |FromAddr|, but asserts that |slot_start| indeed points to the
576 // beginning of a slot. It doesn't check if the slot is actually allocated.
577 //
578 // This works on direct maps too.
FromSlotStart(uintptr_t slot_start)579 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromSlotStart(
580     uintptr_t slot_start) {
581   auto* slot_span = FromAddr(slot_start);
582 #if BUILDFLAG(PA_DCHECK_IS_ON)
583   // Checks that the pointer is a multiple of slot size.
584   uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
585   PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
586 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
587   return slot_span;
588 }
589 
590 // Like |FromAddr|, but asserts that |object| indeed points to the beginning of
591 // an object. It doesn't check if the object is actually allocated.
592 //
593 // This works on direct maps too.
FromObject(void * object)594 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObject(void* object) {
595   uintptr_t object_addr = ObjectPtr2Addr(object);
596   auto* slot_span = FromAddr(object_addr);
597   DCheckIsValidObjectAddress(slot_span, object_addr);
598   return slot_span;
599 }
600 
601 // Like |FromAddr|, but asserts that |address| indeed points within an object.
602 // It doesn't check if the object is actually allocated.
603 //
604 // CAUTION! For direct-mapped allocation, |address| has to be within the first
605 // partition page.
FromObjectInnerAddr(uintptr_t address)606 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerAddr(
607     uintptr_t address) {
608   auto* slot_span = FromAddr(address);
609 #if BUILDFLAG(PA_DCHECK_IS_ON)
610   // Checks that the address is within the expected object boundaries.
611   uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
612   uintptr_t shift_from_slot_start =
613       (address - slot_span_start) % slot_span->bucket->slot_size;
614   DCheckIsValidShiftFromSlotStart(slot_span, shift_from_slot_start);
615 #endif  // BUILDFLAG(PA_DCHECK_IS_ON)
616   return slot_span;
617 }
618 
FromObjectInnerPtr(void * ptr)619 PA_ALWAYS_INLINE SlotSpanMetadata* SlotSpanMetadata::FromObjectInnerPtr(
620     void* ptr) {
621   return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
622 }
623 
SetRawSize(size_t raw_size)624 PA_ALWAYS_INLINE void SlotSpanMetadata::SetRawSize(size_t raw_size) {
625   PA_DCHECK(CanStoreRawSize());
626   auto* subsequent_page_metadata =
627       GetSubsequentPageMetadata(reinterpret_cast<PartitionPage*>(this));
628   subsequent_page_metadata->raw_size = raw_size;
629 }
630 
GetRawSize()631 PA_ALWAYS_INLINE size_t SlotSpanMetadata::GetRawSize() const {
632   PA_DCHECK(CanStoreRawSize());
633   const auto* subsequent_page_metadata =
634       GetSubsequentPageMetadata(reinterpret_cast<const PartitionPage*>(this));
635   return subsequent_page_metadata->raw_size;
636 }
637 
SetFreelistHead(EncodedNextFreelistEntry * new_head)638 PA_ALWAYS_INLINE void SlotSpanMetadata::SetFreelistHead(
639     EncodedNextFreelistEntry* new_head) {
640 #if BUILDFLAG(PA_DCHECK_IS_ON)
641   // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
642   // as well.
643   uintptr_t new_head_untagged = UntagPtr(new_head);
644   PA_DCHECK(!new_head ||
645             (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
646                 (new_head_untagged & kSuperPageBaseMask));
647 #endif
648   freelist_head = new_head;
649   // Inserted something new in the freelist, assume that it is not sorted
650   // anymore.
651   freelist_is_sorted_ = false;
652 }
653 
654 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
SetPoolOffsetFreelistHead(PoolOffsetFreelistEntry * new_head)655 PA_ALWAYS_INLINE void SetPoolOffsetFreelistHead(
656     PoolOffsetFreelistEntry* new_head) {
657   PA_DCHECK(!new_head ||
658             (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
659                 (UntagPtr(new_head) & kSuperPageBaseMask));
660   pool_offset_freelist_head = new_head;
661   freelist_is_sorted = false;
662 }
663 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
664 
PopForAlloc(size_t size)665 PA_ALWAYS_INLINE EncodedNextFreelistEntry* SlotSpanMetadata::PopForAlloc(
666     size_t size) {
667   // Not using bucket->slot_size directly as the compiler doesn't know that
668   // |bucket->slot_size| is the same as |size|.
669   PA_DCHECK(size == bucket->slot_size);
670   EncodedNextFreelistEntry* result = freelist_head;
671   // Not setting freelist_is_sorted_ to false since this doesn't destroy
672   // ordering.
673   freelist_head = freelist_head->GetNext(size);
674   num_allocated_slots++;
675   return result;
676 }
677 
678 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
679 PA_ALWAYS_INLINE PoolOffsetFreelistEntry*
PopPoolOffsetFreelistForAlloc(size_t size)680 SlotSpanMetadata::PopPoolOffsetFreelistForAlloc(size_t size) {
681   PA_DCHECK(size == bucket->slot_size);
682   PoolOffsetFreelistEntry* result = pool_offset_freelist_head;
683   pool_offset_freelist_head = pool_offset_freelist_head->GetNext(size);
684   num_allocated_slots++;
685   return result;
686 }
687 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
688 
Free(uintptr_t slot_start,PartitionRoot * root)689 PA_ALWAYS_INLINE void SlotSpanMetadata::Free(uintptr_t slot_start,
690                                              PartitionRoot* root)
691     // PartitionRootLock() is not defined inside partition_page.h, but
692     // static analysis doesn't require the implementation.
693     PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
694   DCheckRootLockIsAcquired(root);
695 
696 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
697   if (root->uses_pool_offset_freelists()) {
698     return FreeForPoolOffsetFreelist(slot_start, root);
699   }
700 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
701 
702   auto* entry = static_cast<internal::EncodedNextFreelistEntry*>(
703       SlotStartAddr2Ptr(slot_start));
704   // Catches an immediate double free.
705   PA_CHECK(entry != freelist_head);
706   // Look for double free one level deeper in debug.
707   PA_DCHECK(!freelist_head ||
708             entry != freelist_head->GetNext(bucket->slot_size));
709   entry->SetNext(freelist_head);
710   SetFreelistHead(entry);
711   // A best effort double-free check. Works only on empty slot spans.
712   PA_CHECK(num_allocated_slots);
713   --num_allocated_slots;
714   // If the span is marked full, or became empty, take the slow path to update
715   // internal state.
716   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
717     FreeSlowPath(1);
718   } else {
719     // All single-slot allocations must go through the slow path to
720     // correctly update the raw size.
721     PA_DCHECK(!CanStoreRawSize());
722   }
723 }
724 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
FreeForPoolOffsetFreelist(uintptr_t slot_start,PartitionRoot * root)725 PA_ALWAYS_INLINE void SlotSpanMetadata::FreeForPoolOffsetFreelist(
726     uintptr_t slot_start,
727     PartitionRoot* root) PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
728   DCheckRootLockIsAcquired(root);
729   auto* entry = static_cast<internal::PoolOffsetFreelistEntry*>(
730       SlotStartAddr2Ptr(slot_start));
731   // Catches an immediate double free.
732   PA_CHECK(entry != pool_offset_freelist_head);
733   // Look for double free one level deeper in debug.
734   PA_DCHECK(!pool_offset_freelist_head ||
735             entry != pool_offset_freelist_head->GetNext(bucket->slot_size));
736   entry->SetNext(pool_offset_freelist_head);
737   SetPoolOffsetFreelistHead(entry);
738   // A best effort double-free check. Works only on empty slot spans.
739   PA_CHECK(num_allocated_slots);
740   --num_allocated_slots;
741   // If the span is marked full, or became empty, take the slow path to update
742   // internal state.
743   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
744     FreeSlowPath(1);
745   } else {
746     // All single-slot allocations must go through the slow path to
747     // correctly update the raw size.
748     PA_DCHECK(!CanStoreRawSize());
749   }
750 }
751 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
752 
AppendFreeList(EncodedNextFreelistEntry * head,EncodedNextFreelistEntry * tail,size_t number_of_freed,PartitionRoot * root)753 PA_ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList(
754     EncodedNextFreelistEntry* head,
755     EncodedNextFreelistEntry* tail,
756     size_t number_of_freed,
757     PartitionRoot* root) PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root)) {
758 #if BUILDFLAG(PA_DCHECK_IS_ON)
759   DCheckRootLockIsAcquired(root);
760   PA_DCHECK(!tail->GetNext(bucket->slot_size));
761   PA_DCHECK(number_of_freed);
762   PA_DCHECK(num_allocated_slots);
763   if (CanStoreRawSize()) {
764     PA_DCHECK(number_of_freed == 1);
765   }
766   {
767     size_t number_of_entries = 0;
768     for (auto* entry = head; entry;
769          entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
770       uintptr_t untagged_entry = UntagPtr(entry);
771       // Check that all entries belong to this slot span.
772       PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
773       PA_DCHECK(untagged_entry <
774                 ToSlotSpanStart(this) + bucket->get_bytes_per_span());
775     }
776     PA_DCHECK(number_of_entries == number_of_freed);
777   }
778 #endif
779 
780   tail->SetNext(freelist_head);
781   SetFreelistHead(head);
782   PA_DCHECK(num_allocated_slots >= number_of_freed);
783   num_allocated_slots -= number_of_freed;
784   // If the span is marked full, or became empty, take the slow path to update
785   // internal state.
786   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
787     FreeSlowPath(number_of_freed);
788   } else {
789     // All single-slot allocations must go through the slow path to
790     // correctly update the raw size.
791     PA_DCHECK(!CanStoreRawSize());
792   }
793 }
794 
795 #if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
AppendPoolOffsetFreeList(PoolOffsetFreelistEntry * head,PoolOffsetFreelistEntry * tail,size_t number_of_freed,PartitionRoot * root)796 PA_ALWAYS_INLINE void SlotSpanMetadata::AppendPoolOffsetFreeList(
797     PoolOffsetFreelistEntry* head,
798     PoolOffsetFreelistEntry* tail,
799     size_t number_of_freed,
800     PartitionRoot* root) {
801 #if BUILDFLAG(PA_DCHECK_IS_ON)
802   DCheckRootLockIsAcquired(root);
803   PA_DCHECK(!tail->GetNext(bucket->slot_size));
804   PA_DCHECK(number_of_freed);
805   PA_DCHECK(num_allocated_slots);
806   if (CanStoreRawSize()) {
807     PA_DCHECK(number_of_freed == 1);
808   }
809   {
810     size_t number_of_entries = 0;
811     for (auto* entry = head; entry;
812          entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
813       uintptr_t untagged_entry = UntagPtr(entry);
814       // Check that all entries belong to this slot span.
815       PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
816       PA_DCHECK(untagged_entry <
817                 ToSlotSpanStart(this) + bucket->get_bytes_per_span());
818     }
819     PA_DCHECK(number_of_entries == number_of_freed);
820   }
821 #endif
822 
823   tail->SetNext(pool_offset_freelist_head);
824   SetPoolOffsetFreelistHead(head);
825   PA_DCHECK(num_allocated_slots >= number_of_freed);
826   num_allocated_slots -= number_of_freed;
827   // If the span is marked full, or became empty, take the slow path to update
828   // internal state.
829   if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
830     FreeSlowPath(number_of_freed);
831   } else {
832     // All single-slot allocations must go through the slow path to
833     // correctly update the raw size.
834     PA_DCHECK(!CanStoreRawSize());
835   }
836 }
837 #endif  // BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
838 
is_active()839 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_active() const {
840   PA_DCHECK(this != get_sentinel_slot_span());
841   bool ret =
842       (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
843   if (ret) {
844     PA_DCHECK(!marked_full);
845     PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
846   }
847   return ret;
848 }
849 
is_full()850 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_full() const {
851   PA_DCHECK(this != get_sentinel_slot_span());
852   bool ret = (num_allocated_slots == bucket->get_slots_per_span());
853   if (ret) {
854     PA_DCHECK(!freelist_head);
855     PA_DCHECK(!num_unprovisioned_slots);
856     // May or may not be marked full, so don't check for that.
857   }
858   return ret;
859 }
860 
is_empty()861 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_empty() const {
862   PA_DCHECK(this != get_sentinel_slot_span());
863   bool ret = (!num_allocated_slots && freelist_head);
864   if (ret) {
865     PA_DCHECK(!marked_full);
866   }
867   return ret;
868 }
869 
is_decommitted()870 PA_ALWAYS_INLINE bool SlotSpanMetadata::is_decommitted() const {
871   PA_DCHECK(this != get_sentinel_slot_span());
872   bool ret = (!num_allocated_slots && !freelist_head);
873   if (ret) {
874     PA_DCHECK(!marked_full);
875     PA_DCHECK(!num_unprovisioned_slots);
876     PA_DCHECK(!in_empty_cache_);
877   }
878   return ret;
879 }
880 
Reset()881 PA_ALWAYS_INLINE void SlotSpanMetadata::Reset() {
882   PA_DCHECK(is_decommitted());
883 
884   size_t num_slots_per_span = bucket->get_slots_per_span();
885   PA_DCHECK(num_slots_per_span <= kMaxSlotsPerSlotSpan);
886   num_unprovisioned_slots = static_cast<uint32_t>(num_slots_per_span);
887   PA_DCHECK(num_unprovisioned_slots);
888 
889   ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
890 
891   next_slot_span = nullptr;
892 }
893 
894 #if BUILDFLAG(USE_STARSCAN)
895 // Returns the state bitmap from an address within a normal-bucket super page.
896 // It's the caller's responsibility to ensure that the bitmap exists.
StateBitmapFromAddr(uintptr_t address)897 PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
898   PA_DCHECK(IsManagedByNormalBuckets(address));
899   uintptr_t super_page = address & kSuperPageBaseMask;
900   return SuperPageStateBitmap(super_page);
901 }
902 #endif  // BUILDFLAG(USE_STARSCAN)
903 
904 // Iterates over all slot spans in a super-page. |Callback| must return true if
905 // early return is needed.
906 template <typename Callback>
IterateSlotSpans(uintptr_t super_page,bool with_quarantine,Callback callback)907 void IterateSlotSpans(uintptr_t super_page,
908                       bool with_quarantine,
909                       Callback callback) {
910 #if BUILDFLAG(PA_DCHECK_IS_ON)
911   PA_DCHECK(!(super_page % kSuperPageAlignment));
912   auto* extent_entry = PartitionSuperPageToExtent(super_page);
913   DCheckRootLockIsAcquired(extent_entry->root);
914 #endif
915 
916   using Page = PartitionPage;
917   using SlotSpan = SlotSpanMetadata;
918   auto* const first_page =
919       Page::FromAddr(SuperPagePayloadBegin(super_page, with_quarantine));
920   auto* const last_page =
921       Page::FromAddr(SuperPagePayloadEnd(super_page) - PartitionPageSize());
922   Page* page = nullptr;
923   SlotSpan* slot_span = nullptr;
924   for (page = first_page; page <= last_page;) {
925     PA_DCHECK(!page->slot_span_metadata_offset);  // Ensure slot span beginning.
926     if (!page->is_valid) {
927       if (page->has_valid_span_after_this) {
928         // The page doesn't represent a valid slot span, but there is another
929         // one somewhere after this. Keep iterating to find it.
930         ++page;
931         continue;
932       }
933       // There are currently no valid spans from here on. No need to iterate
934       // the rest of the super page.
935       break;
936     }
937     slot_span = &page->slot_span_metadata;
938     if (callback(slot_span)) {
939       return;
940     }
941     page += slot_span->bucket->get_pages_per_slot_span();
942   }
943   // Each super page must have at least one valid slot span.
944   PA_DCHECK(page > first_page);
945   // Just a quick check that the search ended at a valid slot span and there
946   // was no unnecessary iteration over gaps afterwards.
947   PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
948                         slot_span->bucket->get_pages_per_slot_span());
949 }
950 
951 }  // namespace partition_alloc::internal
952 
953 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_PAGE_H_
954