• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "third_party/base/allocator/partition_allocator/partition_page.h"
6 
7 #include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
8 #include "third_party/base/allocator/partition_allocator/partition_root_base.h"
9 
10 namespace pdfium {
11 namespace base {
12 namespace internal {
13 
14 namespace {
15 
PartitionDirectUnmap(PartitionPage * page)16 ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
17   PartitionRootBase* root = PartitionRootBase::FromPage(page);
18   const PartitionDirectMapExtent* extent =
19       PartitionDirectMapExtent::FromPage(page);
20   size_t unmap_size = extent->map_size;
21 
22   // Maintain the doubly-linked list of all direct mappings.
23   if (extent->prev_extent) {
24     DCHECK(extent->prev_extent->next_extent == extent);
25     extent->prev_extent->next_extent = extent->next_extent;
26   } else {
27     root->direct_map_list = extent->next_extent;
28   }
29   if (extent->next_extent) {
30     DCHECK(extent->next_extent->prev_extent == extent);
31     extent->next_extent->prev_extent = extent->prev_extent;
32   }
33 
34   // Add on the size of the trailing guard page and preceeding partition
35   // page.
36   unmap_size += kPartitionPageSize + kSystemPageSize;
37 
38   size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
39   root->DecreaseCommittedPages(uncommitted_page_size);
40   DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
41   root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
42 
43   DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
44 
45   char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
46   // Account for the mapping starting a partition page before the actual
47   // allocation address.
48   ptr -= kPartitionPageSize;
49 
50   FreePages(ptr, unmap_size);
51 }
52 
PartitionRegisterEmptyPage(PartitionPage * page)53 ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
54   DCHECK(page->is_empty());
55   PartitionRootBase* root = PartitionRootBase::FromPage(page);
56 
57   // If the page is already registered as empty, give it another life.
58   if (page->empty_cache_index != -1) {
59     DCHECK(page->empty_cache_index >= 0);
60     DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
61     DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
62     root->global_empty_page_ring[page->empty_cache_index] = nullptr;
63   }
64 
65   int16_t current_index = root->global_empty_page_ring_index;
66   PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index];
67   // The page might well have been re-activated, filled up, etc. before we get
68   // around to looking at it here.
69   if (page_to_decommit)
70     page_to_decommit->DecommitIfPossible(root);
71 
72   // We put the empty slot span on our global list of "pages that were once
73   // empty". thus providing it a bit of breathing room to get re-used before
74   // we really free it. This improves performance, particularly on Mac OS X
75   // which has subpar memory management performance.
76   root->global_empty_page_ring[current_index] = page;
77   page->empty_cache_index = current_index;
78   ++current_index;
79   if (current_index == kMaxFreeableSpans)
80     current_index = 0;
81   root->global_empty_page_ring_index = current_index;
82 }
83 
84 }  // namespace
85 
86 // static
87 PartitionPage PartitionPage::sentinel_page_;
88 
get_sentinel_page()89 PartitionPage* PartitionPage::get_sentinel_page() {
90   return &sentinel_page_;
91 }
92 
FreeSlowPath()93 void PartitionPage::FreeSlowPath() {
94   DCHECK(this != get_sentinel_page());
95   if (LIKELY(num_allocated_slots == 0)) {
96     // Page became fully unused.
97     if (UNLIKELY(bucket->is_direct_mapped())) {
98       PartitionDirectUnmap(this);
99       return;
100     }
101     // If it's the current active page, change it. We bounce the page to
102     // the empty list as a force towards defragmentation.
103     if (LIKELY(this == bucket->active_pages_head))
104       bucket->SetNewActivePage();
105     DCHECK(bucket->active_pages_head != this);
106 
107     set_raw_size(0);
108     DCHECK(!get_raw_size());
109 
110     PartitionRegisterEmptyPage(this);
111   } else {
112     DCHECK(!bucket->is_direct_mapped());
113     // Ensure that the page is full. That's the only valid case if we
114     // arrive here.
115     DCHECK(num_allocated_slots < 0);
116     // A transition of num_allocated_slots from 0 to -1 is not legal, and
117     // likely indicates a double-free.
118     CHECK(num_allocated_slots != -1);
119     num_allocated_slots = -num_allocated_slots - 2;
120     DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
121     // Fully used page became partially used. It must be put back on the
122     // non-full page list. Also make it the current page to increase the
123     // chances of it being filled up again. The old current page will be
124     // the next page.
125     DCHECK(!next_page);
126     if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
127       next_page = bucket->active_pages_head;
128     bucket->active_pages_head = this;
129     --bucket->num_full_pages;
130     // Special case: for a partition page with just a single slot, it may
131     // now be empty and we want to run it through the empty logic.
132     if (UNLIKELY(num_allocated_slots == 0))
133       FreeSlowPath();
134   }
135 }
136 
Decommit(PartitionRootBase * root)137 void PartitionPage::Decommit(PartitionRootBase* root) {
138   DCHECK(is_empty());
139   DCHECK(!bucket->is_direct_mapped());
140   void* addr = PartitionPage::ToPointer(this);
141   root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
142 
143   // We actually leave the decommitted page in the active list. We'll sweep
144   // it on to the decommitted page list when we next walk the active page
145   // list.
146   // Pulling this trick enables us to use a singly-linked page list for all
147   // cases, which is critical in keeping the page metadata structure down to
148   // 32 bytes in size.
149   freelist_head = nullptr;
150   num_unprovisioned_slots = 0;
151   DCHECK(is_decommitted());
152 }
153 
DecommitIfPossible(PartitionRootBase * root)154 void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
155   DCHECK(empty_cache_index >= 0);
156   DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
157   DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
158   empty_cache_index = -1;
159   if (is_empty())
160     Decommit(root);
161 }
162 
163 }  // namespace internal
164 }  // namespace base
165 }  // namespace pdfium
166