• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/page-memory.h"
6 
7 #include "src/base/macros.h"
8 #include "src/base/sanitizer/asan.h"
9 #include "src/heap/cppgc/platform.h"
10 
11 namespace cppgc {
12 namespace internal {
13 
14 namespace {
15 
Unprotect(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler,const PageMemory & page_memory)16 void Unprotect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
17                const PageMemory& page_memory) {
18   if (SupportsCommittingGuardPages(allocator)) {
19     if (!allocator.SetPermissions(page_memory.writeable_region().base(),
20                                   page_memory.writeable_region().size(),
21                                   PageAllocator::Permission::kReadWrite)) {
22       oom_handler("Oilpan: Unprotecting memory.");
23     }
24   } else {
25     // No protection in case the allocator cannot commit at the required
26     // granularity. Only protect if the allocator supports committing at that
27     // granularity.
28     //
29     // The allocator needs to support committing the overall range.
30     CHECK_EQ(0u,
31              page_memory.overall_region().size() % allocator.CommitPageSize());
32     if (!allocator.SetPermissions(page_memory.overall_region().base(),
33                                   page_memory.overall_region().size(),
34                                   PageAllocator::Permission::kReadWrite)) {
35       oom_handler("Oilpan: Unprotecting memory.");
36     }
37   }
38 }
39 
Protect(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler,const PageMemory & page_memory)40 void Protect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
41              const PageMemory& page_memory) {
42   if (SupportsCommittingGuardPages(allocator)) {
43     // Swap the same region, providing the OS with a chance for fast lookup and
44     // change.
45     if (!allocator.SetPermissions(page_memory.writeable_region().base(),
46                                   page_memory.writeable_region().size(),
47                                   PageAllocator::Permission::kNoAccess)) {
48       oom_handler("Oilpan: Protecting memory.");
49     }
50   } else {
51     // See Unprotect().
52     CHECK_EQ(0u,
53              page_memory.overall_region().size() % allocator.CommitPageSize());
54     if (!allocator.SetPermissions(page_memory.overall_region().base(),
55                                   page_memory.overall_region().size(),
56                                   PageAllocator::Permission::kNoAccess)) {
57       oom_handler("Oilpan: Protecting memory.");
58     }
59   }
60 }
61 
ReserveMemoryRegion(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler,size_t allocation_size)62 MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
63                                  FatalOutOfMemoryHandler& oom_handler,
64                                  size_t allocation_size) {
65   void* region_memory =
66       allocator.AllocatePages(nullptr, allocation_size, kPageSize,
67                               PageAllocator::Permission::kNoAccess);
68   if (!region_memory) {
69     oom_handler("Oilpan: Reserving memory.");
70   }
71   const MemoryRegion reserved_region(static_cast<Address>(region_memory),
72                                      allocation_size);
73   DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
74   return reserved_region;
75 }
76 
FreeMemoryRegion(PageAllocator & allocator,const MemoryRegion & reserved_region)77 void FreeMemoryRegion(PageAllocator& allocator,
78                       const MemoryRegion& reserved_region) {
79   // Make sure pages returned to OS are unpoisoned.
80   ASAN_UNPOISON_MEMORY_REGION(reserved_region.base(), reserved_region.size());
81   allocator.FreePages(reserved_region.base(), reserved_region.size());
82 }
83 
84 }  // namespace
85 
PageMemoryRegion(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler,MemoryRegion reserved_region,bool is_large)86 PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
87                                    FatalOutOfMemoryHandler& oom_handler,
88                                    MemoryRegion reserved_region, bool is_large)
89     : allocator_(allocator),
90       oom_handler_(oom_handler),
91       reserved_region_(reserved_region),
92       is_large_(is_large) {}
93 
~PageMemoryRegion()94 PageMemoryRegion::~PageMemoryRegion() {
95   FreeMemoryRegion(allocator_, reserved_region());
96 }
97 
98 // static
99 constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
100 
NormalPageMemoryRegion(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler)101 NormalPageMemoryRegion::NormalPageMemoryRegion(
102     PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler)
103     : PageMemoryRegion(
104           allocator, oom_handler,
105           ReserveMemoryRegion(allocator, oom_handler,
106                               RoundUp(kPageSize * kNumPageRegions,
107                                       allocator.AllocatePageSize())),
108           false) {
109 #ifdef DEBUG
110   for (size_t i = 0; i < kNumPageRegions; ++i) {
111     DCHECK_EQ(false, page_memories_in_use_[i]);
112   }
113 #endif  // DEBUG
114 }
115 
116 NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
117 
Allocate(Address writeable_base)118 void NormalPageMemoryRegion::Allocate(Address writeable_base) {
119   const size_t index = GetIndex(writeable_base);
120   ChangeUsed(index, true);
121   Unprotect(allocator_, oom_handler_, GetPageMemory(index));
122 }
123 
Free(Address writeable_base)124 void NormalPageMemoryRegion::Free(Address writeable_base) {
125   const size_t index = GetIndex(writeable_base);
126   ChangeUsed(index, false);
127   Protect(allocator_, oom_handler_, GetPageMemory(index));
128 }
129 
UnprotectForTesting()130 void NormalPageMemoryRegion::UnprotectForTesting() {
131   for (size_t i = 0; i < kNumPageRegions; ++i) {
132     Unprotect(allocator_, oom_handler_, GetPageMemory(i));
133   }
134 }
135 
LargePageMemoryRegion(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler,size_t length)136 LargePageMemoryRegion::LargePageMemoryRegion(
137     PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
138     size_t length)
139     : PageMemoryRegion(
140           allocator, oom_handler,
141           ReserveMemoryRegion(allocator, oom_handler,
142                               RoundUp(length + 2 * kGuardPageSize,
143                                       allocator.AllocatePageSize())),
144           true) {}
145 
146 LargePageMemoryRegion::~LargePageMemoryRegion() = default;
147 
UnprotectForTesting()148 void LargePageMemoryRegion::UnprotectForTesting() {
149   Unprotect(allocator_, oom_handler_, GetPageMemory());
150 }
151 
152 PageMemoryRegionTree::PageMemoryRegionTree() = default;
153 
154 PageMemoryRegionTree::~PageMemoryRegionTree() = default;
155 
Add(PageMemoryRegion * region)156 void PageMemoryRegionTree::Add(PageMemoryRegion* region) {
157   DCHECK(region);
158   auto result = set_.emplace(region->reserved_region().base(), region);
159   USE(result);
160   DCHECK(result.second);
161 }
162 
Remove(PageMemoryRegion * region)163 void PageMemoryRegionTree::Remove(PageMemoryRegion* region) {
164   DCHECK(region);
165   auto size = set_.erase(region->reserved_region().base());
166   USE(size);
167   DCHECK_EQ(1u, size);
168 }
169 
170 NormalPageMemoryPool::NormalPageMemoryPool() = default;
171 
172 NormalPageMemoryPool::~NormalPageMemoryPool() = default;
173 
Add(size_t bucket,NormalPageMemoryRegion * pmr,Address writeable_base)174 void NormalPageMemoryPool::Add(size_t bucket, NormalPageMemoryRegion* pmr,
175                                Address writeable_base) {
176   DCHECK_LT(bucket, kNumPoolBuckets);
177   pool_[bucket].push_back(std::make_pair(pmr, writeable_base));
178 }
179 
Take(size_t bucket)180 std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
181     size_t bucket) {
182   DCHECK_LT(bucket, kNumPoolBuckets);
183   if (pool_[bucket].empty()) return {nullptr, nullptr};
184   std::pair<NormalPageMemoryRegion*, Address> pair = pool_[bucket].back();
185   pool_[bucket].pop_back();
186   return pair;
187 }
188 
PageBackend(PageAllocator & allocator,FatalOutOfMemoryHandler & oom_handler)189 PageBackend::PageBackend(PageAllocator& allocator,
190                          FatalOutOfMemoryHandler& oom_handler)
191     : allocator_(allocator), oom_handler_(oom_handler) {}
192 
193 PageBackend::~PageBackend() = default;
194 
AllocateNormalPageMemory(size_t bucket)195 Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
196   v8::base::MutexGuard guard(&mutex_);
197   std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
198   if (!result.first) {
199     auto pmr =
200         std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_);
201     for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
202       page_pool_.Add(bucket, pmr.get(),
203                      pmr->GetPageMemory(i).writeable_region().base());
204     }
205     page_memory_region_tree_.Add(pmr.get());
206     normal_page_memory_regions_.push_back(std::move(pmr));
207     result = page_pool_.Take(bucket);
208     DCHECK(result.first);
209   }
210   result.first->Allocate(result.second);
211   return result.second;
212 }
213 
FreeNormalPageMemory(size_t bucket,Address writeable_base)214 void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
215   v8::base::MutexGuard guard(&mutex_);
216   auto* pmr = static_cast<NormalPageMemoryRegion*>(
217       page_memory_region_tree_.Lookup(writeable_base));
218   pmr->Free(writeable_base);
219   page_pool_.Add(bucket, pmr, writeable_base);
220 }
221 
AllocateLargePageMemory(size_t size)222 Address PageBackend::AllocateLargePageMemory(size_t size) {
223   v8::base::MutexGuard guard(&mutex_);
224   auto pmr =
225       std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size);
226   const PageMemory pm = pmr->GetPageMemory();
227   Unprotect(allocator_, oom_handler_, pm);
228   page_memory_region_tree_.Add(pmr.get());
229   large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
230   return pm.writeable_region().base();
231 }
232 
FreeLargePageMemory(Address writeable_base)233 void PageBackend::FreeLargePageMemory(Address writeable_base) {
234   v8::base::MutexGuard guard(&mutex_);
235   PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
236   page_memory_region_tree_.Remove(pmr);
237   auto size = large_page_memory_regions_.erase(pmr);
238   USE(size);
239   DCHECK_EQ(1u, size);
240 }
241 
242 }  // namespace internal
243 }  // namespace cppgc
244