• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
6 #define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
7 
8 #include <string.h>
9 
10 #include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
11 #include "third_party/base/allocator/partition_allocator/partition_bucket.h"
12 #include "third_party/base/allocator/partition_allocator/partition_cookie.h"
13 #include "third_party/base/allocator/partition_allocator/partition_freelist_entry.h"
14 #include "third_party/base/allocator/partition_allocator/random.h"
15 
16 namespace pdfium {
17 namespace base {
18 namespace internal {
19 
20 struct PartitionRootBase;
21 
22 // Some notes on page states. A page can be in one of four major states:
23 // 1) Active.
24 // 2) Full.
25 // 3) Empty.
26 // 4) Decommitted.
27 // An active page has available free slots. A full page has no free slots. An
28 // empty page has no free slots, and a decommitted page is an empty page that
29 // had its backing memory released back to the system.
30 // There are two linked lists tracking the pages. The "active page" list is an
31 // approximation of a list of active pages. It is an approximation because
32 // full, empty and decommitted pages may briefly be present in the list until
33 // we next do a scan over it.
34 // The "empty page" list is an accurate list of pages which are either empty
35 // or decommitted.
36 //
37 // The significant page transitions are:
38 // - free() will detect when a full page has a slot free()'d and immediately
39 // return the page to the head of the active list.
40 // - free() will detect when a page is fully emptied. It _may_ add it to the
41 // empty list or it _may_ leave it on the active list until a future list scan.
42 // - malloc() _may_ scan the active page list in order to fulfil the request.
43 // If it does this, full, empty and decommitted pages encountered will be
44 // booted out of the active list. If there are no suitable active pages found,
45 // an empty or decommitted page (if one exists) will be pulled from the empty
46 // list on to the active list.
47 //
48 // TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
49 // similar. If so, all uses of the term "page" in comments, member variables,
50 // local variables, and documentation that refer to this concept should be
51 // updated.
52 struct PartitionPage {
53   PartitionFreelistEntry* freelist_head;
54   PartitionPage* next_page;
55   PartitionBucket* bucket;
56   // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
57   int16_t num_allocated_slots;
58   uint16_t num_unprovisioned_slots;
59   uint16_t page_offset;
60   int16_t empty_cache_index;  // -1 if not in the empty cache.
61 
62   // Public API
63 
64   // Note the matching Alloc() functions are in PartitionPage.
65   BASE_EXPORT NOINLINE void FreeSlowPath();
66   ALWAYS_INLINE void Free(void* ptr);
67 
68   void Decommit(PartitionRootBase* root);
69   void DecommitIfPossible(PartitionRootBase* root);
70 
71   // Pointer manipulation functions. These must be static as the input |page|
72   // pointer may be the result of an offset calculation and therefore cannot
73   // be trusted. The objective of these functions is to sanitize this input.
74   ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
75   ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
76   ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
77 
78   ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
get_raw_size_ptrPartitionPage79   ALWAYS_INLINE size_t* get_raw_size_ptr() {
80     return const_cast<size_t*>(
81         const_cast<const PartitionPage*>(this)->get_raw_size_ptr());
82   }
83 
84   ALWAYS_INLINE size_t get_raw_size() const;
85   ALWAYS_INLINE void set_raw_size(size_t size);
86 
87   ALWAYS_INLINE void Reset();
88 
89   // TODO(ajwong): Can this be made private?  https://crbug.com/787153
90   BASE_EXPORT static PartitionPage* get_sentinel_page();
91 
92   // Page State accessors.
93   // Note that it's only valid to call these functions on pages found on one of
94   // the page lists. Specifically, you can't call these functions on full pages
95   // that were detached from the active list.
96   //
97   // This restriction provides the flexibity for some of the status fields to
98   // be repurposed when a page is taken off a list. See the negation of
99   // |num_allocated_slots| when a full page is removed from the active list
100   // for an example of such repurposing.
101   ALWAYS_INLINE bool is_active() const;
102   ALWAYS_INLINE bool is_full() const;
103   ALWAYS_INLINE bool is_empty() const;
104   ALWAYS_INLINE bool is_decommitted() const;
105 
106  private:
107   // g_sentinel_page is used as a sentinel to indicate that there is no page
108   // in the active page list. We can use nullptr, but in that case we need
109   // to add a null-check branch to the hot allocation path. We want to avoid
110   // that.
111   //
112   // Note, this declaration is kept in the header as opposed to an anonymous
113   // namespace so the getter can be fully inlined.
114   static PartitionPage sentinel_page_;
115 };
116 static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
117               "PartitionPage must be able to fit in a metadata slot");
118 
PartitionSuperPageToMetadataArea(char * ptr)119 ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
120   uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
121   DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
122   // The metadata area is exactly one system page (the guard page) into the
123   // super page.
124   return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
125 }
126 
FromPointerNoAlignmentCheck(void * ptr)127 ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
128     void* ptr) {
129   uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
130   char* super_page_ptr =
131       reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
132   uintptr_t partition_page_index =
133       (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
134   // Index 0 is invalid because it is the metadata and guard area and
135   // the last index is invalid because it is a guard page.
136   DCHECK(partition_page_index);
137   DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
138   PartitionPage* page = reinterpret_cast<PartitionPage*>(
139       PartitionSuperPageToMetadataArea(super_page_ptr) +
140       (partition_page_index << kPageMetadataShift));
141   // Partition pages in the same slot span can share the same page object.
142   // Adjust for that.
143   size_t delta = page->page_offset << kPageMetadataShift;
144   page =
145       reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
146   return page;
147 }
148 
149 // Resturns start of the slot span for the PartitionPage.
ToPointer(const PartitionPage * page)150 ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
151   uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
152 
153   uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
154 
155   // A valid |page| must be past the first guard System page and within
156   // the following metadata region.
157   DCHECK(super_page_offset > kSystemPageSize);
158   // Must be less than total metadata region.
159   DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
160                                                 kPageMetadataSize));
161   uintptr_t partition_page_index =
162       (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
163   // Index 0 is invalid because it is the superpage extent metadata and the
164   // last index is invalid because the whole PartitionPage is set as guard
165   // pages for the metadata region.
166   DCHECK(partition_page_index);
167   DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
168   uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
169   void* ret = reinterpret_cast<void*>(
170       super_page_base + (partition_page_index << kPartitionPageShift));
171   return ret;
172 }
173 
FromPointer(void * ptr)174 ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
175   PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
176   // Checks that the pointer is a multiple of bucket size.
177   DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
178             reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
179            page->bucket->slot_size));
180   return page;
181 }
182 
get_raw_size_ptr()183 ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
184   // For single-slot buckets which span more than one partition page, we
185   // have some spare metadata space to store the raw allocation size. We
186   // can use this to report better statistics.
187   if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
188     return nullptr;
189 
190   DCHECK((bucket->slot_size % kSystemPageSize) == 0);
191   DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
192 
193   const PartitionPage* the_next_page = this + 1;
194   return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
195 }
196 
get_raw_size()197 ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
198   const size_t* ptr = get_raw_size_ptr();
199   if (UNLIKELY(ptr != nullptr))
200     return *ptr;
201   return 0;
202 }
203 
Free(void * ptr)204 ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
205 #if DCHECK_IS_ON()
206   size_t slot_size = bucket->slot_size;
207   const size_t raw_size = get_raw_size();
208   if (raw_size) {
209     slot_size = raw_size;
210   }
211 
212   // If these asserts fire, you probably corrupted memory.
213   PartitionCookieCheckValue(ptr);
214   PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
215                             kCookieSize);
216 
217   memset(ptr, kFreedByte, slot_size);
218 #endif
219 
220   DCHECK(num_allocated_slots);
221   // Catches an immediate double free.
222   CHECK(ptr != freelist_head);
223   // Look for double free one level deeper in debug.
224   DCHECK(!freelist_head ||
225          ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
226   internal::PartitionFreelistEntry* entry =
227       static_cast<internal::PartitionFreelistEntry*>(ptr);
228   entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
229   freelist_head = entry;
230   --num_allocated_slots;
231   if (UNLIKELY(num_allocated_slots <= 0)) {
232     FreeSlowPath();
233   } else {
234     // All single-slot allocations must go through the slow path to
235     // correctly update the size metadata.
236     DCHECK(get_raw_size() == 0);
237   }
238 }
239 
is_active()240 ALWAYS_INLINE bool PartitionPage::is_active() const {
241   DCHECK(this != get_sentinel_page());
242   DCHECK(!page_offset);
243   return (num_allocated_slots > 0 &&
244           (freelist_head || num_unprovisioned_slots));
245 }
246 
is_full()247 ALWAYS_INLINE bool PartitionPage::is_full() const {
248   DCHECK(this != get_sentinel_page());
249   DCHECK(!page_offset);
250   bool ret = (num_allocated_slots == bucket->get_slots_per_span());
251   if (ret) {
252     DCHECK(!freelist_head);
253     DCHECK(!num_unprovisioned_slots);
254   }
255   return ret;
256 }
257 
is_empty()258 ALWAYS_INLINE bool PartitionPage::is_empty() const {
259   DCHECK(this != get_sentinel_page());
260   DCHECK(!page_offset);
261   return (!num_allocated_slots && freelist_head);
262 }
263 
is_decommitted()264 ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
265   DCHECK(this != get_sentinel_page());
266   DCHECK(!page_offset);
267   bool ret = (!num_allocated_slots && !freelist_head);
268   if (ret) {
269     DCHECK(!num_unprovisioned_slots);
270     DCHECK(empty_cache_index == -1);
271   }
272   return ret;
273 }
274 
set_raw_size(size_t size)275 ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
276   size_t* raw_size_ptr = get_raw_size_ptr();
277   if (UNLIKELY(raw_size_ptr != nullptr))
278     *raw_size_ptr = size;
279 }
280 
Reset()281 ALWAYS_INLINE void PartitionPage::Reset() {
282   DCHECK(is_decommitted());
283 
284   num_unprovisioned_slots = bucket->get_slots_per_span();
285   DCHECK(num_unprovisioned_slots);
286 
287   next_page = nullptr;
288 }
289 
290 }  // namespace internal
291 }  // namespace base
292 }  // namespace pdfium
293 
294 #endif  // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
295