• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 
11 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
12 #include "partition_alloc/partition_alloc_base/component_export.h"
13 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
14 #include "partition_alloc/partition_alloc_check.h"
15 #include "partition_alloc/partition_alloc_constants.h"
16 #include "partition_alloc/partition_alloc_forward.h"
17 #include "partition_alloc/partition_page_constants.h"
18 
19 namespace partition_alloc::internal {
20 
21 constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8;
22 
23 // Visible for testing.
24 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
25 uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
26                                       bool prefer_smaller_slot_spans);
27 
28 // Visible for testing.
29 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
30 bool CompareSlotSpans(SlotSpanMetadata* a, SlotSpanMetadata* b);
31 
32 struct PartitionBucket {
33   // Accessed most in hot path => goes first. Only nullptr for invalid buckets,
34   // may be pointing to the sentinel.
35   SlotSpanMetadata* active_slot_spans_head;
36 
37   SlotSpanMetadata* empty_slot_spans_head;
38   SlotSpanMetadata* decommitted_slot_spans_head;
39   uint32_t slot_size;
40   uint32_t num_system_pages_per_slot_span
41       : kPartitionNumSystemPagesPerSlotSpanBits;
42   uint32_t num_full_slot_spans : 24;
43 
44   // `slot_size_reciprocal` is used to improve the performance of
45   // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
46   // chosen to provide the desired accuracy. As a result, we can replace a slow
47   // integer division (or modulo) operation with a pair of multiplication and a
48   // bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
49   uint64_t slot_size_reciprocal;
50 
51   // This is `M` from the formula above. For accurate results, both `value` and
52   // `size`, which are bound by `kMaxBucketed` for our purposes, must be less
53   // than `2 ** (M / 2)`. On the other hand, the result of the expression
54   // `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
55   static constexpr uint64_t kReciprocalShift = 42;
56   static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
57   static_assert(
58       kMaxBucketed < (1 << (kReciprocalShift / 2)),
59       "GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
60       "large.");
61 
62   static constexpr size_t kMaxSlotSpansToSort = 200;
63 
64   // Public API.
65   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void Init(uint32_t new_slot_size);
66 
67   // Sets |is_already_zeroed| to true if the allocation was satisfied by
68   // requesting (a) new page(s) from the operating system, or false otherwise.
69   // This enables an optimization for when callers use
70   // |AllocFlags::kZeroFill|: there is no need to call memset on fresh
71   // pages; the OS has already zeroed them. (See
72   // |PartitionRoot::AllocFromBucket|.)
73   //
74   // Note the matching Free() functions are in SlotSpanMetadata.
75   PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
76       SlowPathAlloc(PartitionRoot* root,
77                     AllocFlags flags,
78                     size_t raw_size,
79                     size_t slot_span_alignment,
80                     bool* is_already_zeroed)
81           PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
82 
CanStoreRawSizePartitionBucket83   PA_ALWAYS_INLINE bool CanStoreRawSize() const {
84     // For direct-map as well as single-slot slot spans (recognized by checking
85     // against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
86     // subsequent PartitionPage to store the raw size. It isn't only metadata
87     // space though, slot spans that have more than one slot can't have raw size
88     // stored, because we wouldn't know which slot it applies to.
89     if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
90       return false;
91     }
92 
93     PA_DCHECK((slot_size % SystemPageSize()) == 0);
94     PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
95 
96     return true;
97   }
98 
99   // Some buckets are pseudo-buckets, which are disabled because they would
100   // otherwise not fulfill alignment constraints.
is_validPartitionBucket101   PA_ALWAYS_INLINE bool is_valid() const {
102     return active_slot_spans_head != nullptr;
103   }
is_direct_mappedPartitionBucket104   PA_ALWAYS_INLINE bool is_direct_mapped() const {
105     return !num_system_pages_per_slot_span;
106   }
get_bytes_per_spanPartitionBucket107   PA_ALWAYS_INLINE size_t get_bytes_per_span() const {
108     // Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255
109     // pages fit in a size_t.
110     static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, "");
111     return static_cast<size_t>(num_system_pages_per_slot_span)
112            << SystemPageShift();
113   }
get_slots_per_spanPartitionBucket114   PA_ALWAYS_INLINE size_t get_slots_per_span() const {
115     size_t ret = GetSlotNumber(get_bytes_per_span());
116     PA_DCHECK(ret <= kMaxSlotsPerSlotSpan);
117     return ret;
118   }
119   // Returns a natural number of partition pages (calculated by
120   // ComputeSystemPagesPerSlotSpan()) to allocate from the current super page
121   // when the bucket runs out of slots.
get_pages_per_slot_spanPartitionBucket122   PA_ALWAYS_INLINE size_t get_pages_per_slot_span() const {
123     // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
124     return (num_system_pages_per_slot_span +
125             (NumSystemPagesPerPartitionPage() - 1)) /
126            NumSystemPagesPerPartitionPage();
127   }
128 
129   // This helper function scans a bucket's active slot span list for a suitable
130   // new active slot span.  When it finds a suitable new active slot span (one
131   // that has free slots and is not empty), it is set as the new active slot
132   // span. If there is no suitable new active slot span, the current active slot
133   // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
134   // slot spans are scanned, they are tidied up according to their state. Empty
135   // slot spans are swept on to the empty list, decommitted slot spans on to the
136   // decommitted list and full slot spans are unlinked from any list.
137   //
138   // This is where the guts of the bucket maintenance is done!
139   bool SetNewActiveSlotSpan();
140 
141   // Walks the entire active slot span list, and perform regular maintenance,
142   // where empty, decommitted and full slot spans are moved to their
143   // steady-state place.
144   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void MaintainActiveList();
145 
146   // Returns a slot number starting from the beginning of the slot span.
GetSlotNumberPartitionBucket147   PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
148     // See the static assertion for `kReciprocalShift` above.
149     PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
150     PA_DCHECK(slot_size <= kMaxBucketed);
151 
152     const size_t offset_in_slot =
153         ((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
154     PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
155 
156     return offset_in_slot;
157   }
158 
159   // Sort the freelists of all slot spans.
160   void SortSmallerSlotSpanFreeLists();
161   // Sort the active slot span list in ascending freelist length.
162   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SortActiveSlotSpans();
163 
164   // We need `AllocNewSuperPageSpan` and `InitializeSlotSpan` to stay
165   // PA_ALWAYS_INLINE for speed, but we also need to use them from a separate
166   // compilation unit.
167   uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot* root,
168                                             size_t super_page_count,
169                                             AllocFlags flags)
170       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
171   void InitializeSlotSpanForGwpAsan(SlotSpanMetadata* slot_span);
172 
173  private:
174   // Allocates several consecutive super pages. Returns the address of the first
175   // super page.
176   PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(PartitionRoot* root,
177                                                    size_t super_page_count,
178                                                    AllocFlags flags)
179       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
180   // Allocates a new slot span with size |num_partition_pages| from the
181   // current extent. Metadata within this slot span will be initialized.
182   // Returns nullptr on error.
183   PA_ALWAYS_INLINE SlotSpanMetadata* AllocNewSlotSpan(
184       PartitionRoot* root,
185       AllocFlags flags,
186       size_t slot_span_alignment)
187       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
188 
189   // Allocates a new super page from the current extent, if possible. All
190   // slot-spans will be in the decommitted state. Returns the address of the
191   // super page's payload, or 0 on error.
192   PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot* root,
193                                                AllocFlags flags)
194       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
195 
196   // Each bucket allocates a slot span when it runs out of slots.
197   // A slot span's size is equal to get_pages_per_slot_span() number of
198   // partition pages. This function initializes all PartitionPage within the
199   // span to point to the first PartitionPage which holds all the metadata
200   // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
201   // as the owner of the span. It does NOT put the slots into the bucket's
202   // freelist.
203   PA_ALWAYS_INLINE void InitializeSlotSpan(SlotSpanMetadata* slot_span);
204 
205   // Initializes a super page. Returns the address of the super page's payload.
206   PA_ALWAYS_INLINE uintptr_t InitializeSuperPage(PartitionRoot* root,
207                                                  uintptr_t super_page,
208                                                  uintptr_t requested_address)
209       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
210   // Commit 1 or more pages in |slot_span|, enough to get the next slot, which
211   // is returned by this function. If more slots fit into the committed pages,
212   // they'll be added to the free list of the slot span (note that next pointers
213   // are stored inside the slots).
214   // The free list must be empty when calling this function.
215   //
216   // If |slot_span| was freshly allocated, it must have been passed through
217   // InitializeSlotSpan() first.
218   PA_ALWAYS_INLINE uintptr_t
219   ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
220                                 AllocFlags flags,
221                                 SlotSpanMetadata* slot_span)
222       PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
223 };
224 
225 }  // namespace partition_alloc::internal
226 
227 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_PARTITION_BUCKET_H_
228