• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
7 
8 #include <cstddef>
9 #include <cstdint>
10 
11 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
12 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
14 #include "base/allocator/partition_allocator/partition_alloc_check.h"
15 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
16 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
17 
18 namespace partition_alloc::internal {
19 
20 constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8;
21 
22 // Visible for testing.
23 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
24 uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
25                                       bool prefer_smaller_slot_spans);
26 
27 // Visible for testing.
28 PA_COMPONENT_EXPORT(PARTITION_ALLOC)
29 bool CompareSlotSpans(SlotSpanMetadata<ThreadSafe>* a,
30                       SlotSpanMetadata<ThreadSafe>* b);
31 
32 template <bool thread_safe>
33 struct PartitionBucket {
34   // Accessed most in hot path => goes first. Only nullptr for invalid buckets,
35   // may be pointing to the sentinel.
36   SlotSpanMetadata<thread_safe>* active_slot_spans_head;
37 
38   SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
39   SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
40   uint32_t slot_size;
41   uint32_t num_system_pages_per_slot_span
42       : kPartitionNumSystemPagesPerSlotSpanBits;
43   uint32_t num_full_slot_spans : 24;
44 
45   // `slot_size_reciprocal` is used to improve the performance of
46   // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
47   // chosen to provide the desired accuracy. As a result, we can replace a slow
48   // integer division (or modulo) operation with a pair of multiplication and a
49   // bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
50   uint64_t slot_size_reciprocal;
51 
52   // This is `M` from the formula above. For accurate results, both `value` and
53   // `size`, which are bound by `kMaxBucketed` for our purposes, must be less
54   // than `2 ** (M / 2)`. On the other hand, the result of the expression
55   // `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
56   static constexpr uint64_t kReciprocalShift = 42;
57   static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
58   static_assert(
59       kMaxBucketed < (1 << (kReciprocalShift / 2)),
60       "GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
61       "large.");
62 
63   static constexpr size_t kMaxSlotSpansToSort = 200;
64 
65   // Public API.
66   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void Init(uint32_t new_slot_size);
67 
68   // Sets |is_already_zeroed| to true if the allocation was satisfied by
69   // requesting (a) new page(s) from the operating system, or false otherwise.
70   // This enables an optimization for when callers use
71   // |AllocFlags::kZeroFill|: there is no need to call memset on fresh
72   // pages; the OS has already zeroed them. (See
73   // |PartitionRoot::AllocFromBucket|.)
74   //
75   // Note the matching Free() functions are in SlotSpanMetadata.
76   PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
77       SlowPathAlloc(PartitionRoot<thread_safe>* root,
78                     unsigned int flags,
79                     size_t raw_size,
80                     size_t slot_span_alignment,
81                     bool* is_already_zeroed)
82           PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
83 
CanStoreRawSizePartitionBucket84   PA_ALWAYS_INLINE bool CanStoreRawSize() const {
85     // For direct-map as well as single-slot slot spans (recognized by checking
86     // against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
87     // subsequent PartitionPage to store the raw size. It isn't only metadata
88     // space though, slot spans that have more than one slot can't have raw size
89     // stored, because we wouldn't know which slot it applies to.
90     if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
91       return false;
92     }
93 
94     PA_DCHECK((slot_size % SystemPageSize()) == 0);
95     PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
96 
97     return true;
98   }
99 
100   // Some buckets are pseudo-buckets, which are disabled because they would
101   // otherwise not fulfill alignment constraints.
is_validPartitionBucket102   PA_ALWAYS_INLINE bool is_valid() const {
103     return active_slot_spans_head != nullptr;
104   }
is_direct_mappedPartitionBucket105   PA_ALWAYS_INLINE bool is_direct_mapped() const {
106     return !num_system_pages_per_slot_span;
107   }
get_bytes_per_spanPartitionBucket108   PA_ALWAYS_INLINE size_t get_bytes_per_span() const {
109     // Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255
110     // pages fit in a size_t.
111     static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, "");
112     return static_cast<size_t>(num_system_pages_per_slot_span)
113            << SystemPageShift();
114   }
get_slots_per_spanPartitionBucket115   PA_ALWAYS_INLINE size_t get_slots_per_span() const {
116     size_t ret = GetSlotNumber(get_bytes_per_span());
117     PA_DCHECK(ret <= SlotSpanMetadata<thread_safe>::kMaxSlotsPerSlotSpan);
118     return ret;
119   }
120   // Returns a natural number of partition pages (calculated by
121   // ComputeSystemPagesPerSlotSpan()) to allocate from the current super page
122   // when the bucket runs out of slots.
get_pages_per_slot_spanPartitionBucket123   PA_ALWAYS_INLINE size_t get_pages_per_slot_span() const {
124     // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
125     return (num_system_pages_per_slot_span +
126             (NumSystemPagesPerPartitionPage() - 1)) /
127            NumSystemPagesPerPartitionPage();
128   }
129 
130   // This helper function scans a bucket's active slot span list for a suitable
131   // new active slot span.  When it finds a suitable new active slot span (one
132   // that has free slots and is not empty), it is set as the new active slot
133   // span. If there is no suitable new active slot span, the current active slot
134   // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
135   // slot spans are scanned, they are tidied up according to their state. Empty
136   // slot spans are swept on to the empty list, decommitted slot spans on to the
137   // decommitted list and full slot spans are unlinked from any list.
138   //
139   // This is where the guts of the bucket maintenance is done!
140   bool SetNewActiveSlotSpan();
141 
142   // Walks the entire active slot span list, and perform regular maintenance,
143   // where empty, decommitted and full slot spans are moved to their
144   // steady-state place.
145   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void MaintainActiveList();
146 
147   // Returns a slot number starting from the beginning of the slot span.
GetSlotNumberPartitionBucket148   PA_ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
149     // See the static assertion for `kReciprocalShift` above.
150     PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
151     PA_DCHECK(slot_size <= kMaxBucketed);
152 
153     const size_t offset_in_slot =
154         ((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
155     PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
156 
157     return offset_in_slot;
158   }
159 
160   // Sort the freelists of all slot spans.
161   void SortSlotSpanFreelists();
162   // Sort the active slot span list in ascending freelist length.
163   PA_COMPONENT_EXPORT(PARTITION_ALLOC) void SortActiveSlotSpans();
164 
165   // We need `AllocNewSuperPageSpan` and `InitializeSlotSpan` to stay
166   // PA_ALWAYS_INLINE for speed, but we also need to use them from a separate
167   // compilation unit.
168   uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot<thread_safe>* root,
169                                             size_t super_page_count,
170                                             unsigned int flags)
171       PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
172   void InitializeSlotSpanForGwpAsan(SlotSpanMetadata<thread_safe>* slot_span);
173 
174  private:
175   // Allocates several consecutive super pages. Returns the address of the first
176   // super page.
177   PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(
178       PartitionRoot<thread_safe>* root,
179       size_t super_page_count,
180       unsigned int flags) PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
181 
182   // Allocates a new slot span with size |num_partition_pages| from the
183   // current extent. Metadata within this slot span will be initialized.
184   // Returns nullptr on error.
185   PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>* AllocNewSlotSpan(
186       PartitionRoot<thread_safe>* root,
187       unsigned int flags,
188       size_t slot_span_alignment) PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
189 
190   // Allocates a new super page from the current extent, if possible. All
191   // slot-spans will be in the decommitted state. Returns the address of the
192   // super page's payload, or 0 on error.
193   PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot<thread_safe>* root,
194                                                unsigned int flags)
195       PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
196 
197   // Each bucket allocates a slot span when it runs out of slots.
198   // A slot span's size is equal to get_pages_per_slot_span() number of
199   // partition pages. This function initializes all PartitionPage within the
200   // span to point to the first PartitionPage which holds all the metadata
201   // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
202   // as the owner of the span. It does NOT put the slots into the bucket's
203   // freelist.
204   PA_ALWAYS_INLINE void InitializeSlotSpan(
205       SlotSpanMetadata<thread_safe>* slot_span);
206 
207   // Initializes a super page. Returns the address of the super page's payload.
208   PA_ALWAYS_INLINE uintptr_t InitializeSuperPage(
209       PartitionRoot<thread_safe>* root,
210       uintptr_t super_page,
211       uintptr_t requested_address) PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
212 
213   // Commit 1 or more pages in |slot_span|, enough to get the next slot, which
214   // is returned by this function. If more slots fit into the committed pages,
215   // they'll be added to the free list of the slot span (note that next pointers
216   // are stored inside the slots).
217   // The free list must be empty when calling this function.
218   //
219   // If |slot_span| was freshly allocated, it must have been passed through
220   // InitializeSlotSpan() first.
221   PA_ALWAYS_INLINE uintptr_t
222   ProvisionMoreSlotsAndAllocOne(PartitionRoot<thread_safe>* root,
223                                 SlotSpanMetadata<thread_safe>* slot_span)
224       PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
225 };
226 
227 }  // namespace partition_alloc::internal
228 
229 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
230