• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/memory-allocator.h"
6 
7 #include <cinttypes>
8 
9 #include "src/base/address-region.h"
10 #include "src/common/globals.h"
11 #include "src/execution/isolate.h"
12 #include "src/flags/flags.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/heap-inl.h"
15 #include "src/heap/memory-chunk.h"
16 #include "src/heap/read-only-spaces.h"
17 #include "src/logging/log.h"
18 #include "src/utils/allocation.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
24     LAZY_INSTANCE_INITIALIZER;
25 
GetAddressHint(size_t code_range_size)26 Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
27   base::MutexGuard guard(&mutex_);
28   auto it = recently_freed_.find(code_range_size);
29   if (it == recently_freed_.end() || it->second.empty()) {
30     return reinterpret_cast<Address>(GetRandomMmapAddr());
31   }
32   Address result = it->second.back();
33   it->second.pop_back();
34   return result;
35 }
36 
NotifyFreedCodeRange(Address code_range_start,size_t code_range_size)37 void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
38                                                 size_t code_range_size) {
39   base::MutexGuard guard(&mutex_);
40   recently_freed_[code_range_size].push_back(code_range_start);
41 }
42 
43 // -----------------------------------------------------------------------------
44 // MemoryAllocator
45 //
46 
MemoryAllocator(Isolate * isolate,size_t capacity,size_t code_range_size)47 MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
48                                  size_t code_range_size)
49     : isolate_(isolate),
50       data_page_allocator_(isolate->page_allocator()),
51       code_page_allocator_(nullptr),
52       capacity_(RoundUp(capacity, Page::kPageSize)),
53       size_(0),
54       size_executable_(0),
55       lowest_ever_allocated_(static_cast<Address>(-1ll)),
56       highest_ever_allocated_(kNullAddress),
57       unmapper_(isolate->heap(), this) {
58   InitializeCodePageAllocator(data_page_allocator_, code_range_size);
59 }
60 
InitializeCodePageAllocator(v8::PageAllocator * page_allocator,size_t requested)61 void MemoryAllocator::InitializeCodePageAllocator(
62     v8::PageAllocator* page_allocator, size_t requested) {
63   DCHECK_NULL(code_page_allocator_instance_.get());
64 
65   code_page_allocator_ = page_allocator;
66 
67   if (requested == 0) {
68     if (!isolate_->RequiresCodeRange()) return;
69     // When a target requires the code range feature, we put all code objects
70     // in a kMaximalCodeRangeSize range of virtual address space, so that
71     // they can call each other with near calls.
72     requested = kMaximalCodeRangeSize;
73   } else if (requested <= kMinimumCodeRangeSize) {
74     requested = kMinimumCodeRangeSize;
75   }
76 
77   const size_t reserved_area =
78       kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
79   if (requested < (kMaximalCodeRangeSize - reserved_area)) {
80     requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
81     // Fullfilling both reserved pages requirement and huge code area
82     // alignments is not supported (requires re-implementation).
83     DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
84   }
85   DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
86 
87   Address hint =
88       RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
89                 page_allocator->AllocatePageSize());
90   VirtualMemory reservation(
91       page_allocator, requested, reinterpret_cast<void*>(hint),
92       Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
93   if (!reservation.IsReserved()) {
94     V8::FatalProcessOutOfMemory(isolate_,
95                                 "CodeRange setup: allocate virtual memory");
96   }
97   code_range_ = reservation.region();
98   isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
99 
100   // We are sure that we have mapped a block of requested addresses.
101   DCHECK_GE(reservation.size(), requested);
102   Address base = reservation.address();
103 
104   // On some platforms, specifically Win64, we need to reserve some pages at
105   // the beginning of an executable space. See
106   //   https://cs.chromium.org/chromium/src/components/crash/content/
107   //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
108   // for details.
109   if (reserved_area > 0) {
110     if (!reservation.SetPermissions(base, reserved_area,
111                                     PageAllocator::kReadWrite))
112       V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
113 
114     base += reserved_area;
115   }
116   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
117   size_t size =
118       RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
119                 MemoryChunk::kPageSize);
120   DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
121 
122   LOG(isolate_,
123       NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
124                requested));
125 
126   code_reservation_ = std::move(reservation);
127   code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
128       page_allocator, aligned_base, size,
129       static_cast<size_t>(MemoryChunk::kAlignment));
130   code_page_allocator_ = code_page_allocator_instance_.get();
131 }
132 
TearDown()133 void MemoryAllocator::TearDown() {
134   unmapper()->TearDown();
135 
136   // Check that spaces were torn down before MemoryAllocator.
137   DCHECK_EQ(size_, 0u);
138   // TODO(gc) this will be true again when we fix FreeMemory.
139   // DCHECK_EQ(0, size_executable_);
140   capacity_ = 0;
141 
142   if (last_chunk_.IsReserved()) {
143     last_chunk_.Free();
144   }
145 
146   if (code_page_allocator_instance_.get()) {
147     DCHECK(!code_range_.is_empty());
148     code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
149                                                             code_range_.size());
150     code_range_ = base::AddressRegion();
151     code_page_allocator_instance_.reset();
152   }
153   code_page_allocator_ = nullptr;
154   data_page_allocator_ = nullptr;
155 }
156 
157 class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
158  public:
UnmapFreeMemoryJob(Isolate * isolate,Unmapper * unmapper)159   explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper)
160       : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {}
161 
Run(JobDelegate * delegate)162   void Run(JobDelegate* delegate) override {
163     TRACE_BACKGROUND_GC(tracer_,
164                         GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
165     unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
166         delegate);
167     if (FLAG_trace_unmapper) {
168       PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
169     }
170   }
171 
GetMaxConcurrency(size_t worker_count) const172   size_t GetMaxConcurrency(size_t worker_count) const override {
173     const size_t kTaskPerChunk = 8;
174     return std::min<size_t>(
175         kMaxUnmapperTasks,
176         worker_count +
177             (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) /
178                 kTaskPerChunk);
179   }
180 
181  private:
182   Unmapper* const unmapper_;
183   GCTracer* const tracer_;
184   DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryJob);
185 };
186 
FreeQueuedChunks()187 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
188   if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
189     if (job_handle_ && job_handle_->IsValid()) {
190       job_handle_->NotifyConcurrencyIncrease();
191     } else {
192       job_handle_ = V8::GetCurrentPlatform()->PostJob(
193           TaskPriority::kUserVisible,
194           std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this));
195       if (FLAG_trace_unmapper) {
196         PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n");
197       }
198     }
199   } else {
200     PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
201   }
202 }
203 
CancelAndWaitForPendingTasks()204 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
205   if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
206 
207   if (FLAG_trace_unmapper) {
208     PrintIsolate(
209         heap_->isolate(),
210         "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
211   }
212 }
213 
PrepareForGC()214 void MemoryAllocator::Unmapper::PrepareForGC() {
215   // Free non-regular chunks because they cannot be re-used.
216   PerformFreeMemoryOnQueuedNonRegularChunks();
217 }
218 
EnsureUnmappingCompleted()219 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
220   CancelAndWaitForPendingTasks();
221   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
222 }
223 
PerformFreeMemoryOnQueuedNonRegularChunks(JobDelegate * delegate)224 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
225     JobDelegate* delegate) {
226   MemoryChunk* chunk = nullptr;
227   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
228     allocator_->PerformFreeMemory(chunk);
229     if (delegate && delegate->ShouldYield()) return;
230   }
231 }
232 
233 template <MemoryAllocator::Unmapper::FreeMode mode>
PerformFreeMemoryOnQueuedChunks(JobDelegate * delegate)234 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
235     JobDelegate* delegate) {
236   MemoryChunk* chunk = nullptr;
237   if (FLAG_trace_unmapper) {
238     PrintIsolate(
239         heap_->isolate(),
240         "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
241         NumberOfChunks());
242   }
243   // Regular chunks.
244   while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
245     bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
246     allocator_->PerformFreeMemory(chunk);
247     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
248     if (delegate && delegate->ShouldYield()) return;
249   }
250   if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
251     // The previous loop uncommitted any pages marked as pooled and added them
252     // to the pooled list. In case of kReleasePooled we need to free them
253     // though.
254     while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
255       allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
256       if (delegate && delegate->ShouldYield()) return;
257     }
258   }
259   PerformFreeMemoryOnQueuedNonRegularChunks();
260 }
261 
TearDown()262 void MemoryAllocator::Unmapper::TearDown() {
263   CHECK(!job_handle_ || !job_handle_->IsValid());
264   PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
265   for (int i = 0; i < kNumberOfChunkQueues; i++) {
266     DCHECK(chunks_[i].empty());
267   }
268 }
269 
NumberOfCommittedChunks()270 size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
271   base::MutexGuard guard(&mutex_);
272   return chunks_[kRegular].size() + chunks_[kNonRegular].size();
273 }
274 
NumberOfChunks()275 int MemoryAllocator::Unmapper::NumberOfChunks() {
276   base::MutexGuard guard(&mutex_);
277   size_t result = 0;
278   for (int i = 0; i < kNumberOfChunkQueues; i++) {
279     result += chunks_[i].size();
280   }
281   return static_cast<int>(result);
282 }
283 
CommittedBufferedMemory()284 size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
285   base::MutexGuard guard(&mutex_);
286 
287   size_t sum = 0;
288   // kPooled chunks are already uncommited. We only have to account for
289   // kRegular and kNonRegular chunks.
290   for (auto& chunk : chunks_[kRegular]) {
291     sum += chunk->size();
292   }
293   for (auto& chunk : chunks_[kNonRegular]) {
294     sum += chunk->size();
295   }
296   return sum;
297 }
298 
CommitMemory(VirtualMemory * reservation)299 bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
300   Address base = reservation->address();
301   size_t size = reservation->size();
302   if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
303     return false;
304   }
305   UpdateAllocatedSpaceLimits(base, base + size);
306   return true;
307 }
308 
UncommitMemory(VirtualMemory * reservation)309 bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
310   size_t size = reservation->size();
311   if (!reservation->SetPermissions(reservation->address(), size,
312                                    PageAllocator::kNoAccess)) {
313     return false;
314   }
315   return true;
316 }
317 
FreeMemory(v8::PageAllocator * page_allocator,Address base,size_t size)318 void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
319                                  Address base, size_t size) {
320   CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
321 }
322 
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,void * hint,VirtualMemory * controller)323 Address MemoryAllocator::AllocateAlignedMemory(
324     size_t reserve_size, size_t commit_size, size_t alignment,
325     Executability executable, void* hint, VirtualMemory* controller) {
326   v8::PageAllocator* page_allocator = this->page_allocator(executable);
327   DCHECK(commit_size <= reserve_size);
328   VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
329   if (!reservation.IsReserved()) return kNullAddress;
330   Address base = reservation.address();
331   size_ += reservation.size();
332 
333   if (executable == EXECUTABLE) {
334     if (!CommitExecutableMemory(&reservation, base, commit_size,
335                                 reserve_size)) {
336       base = kNullAddress;
337     }
338   } else {
339     if (reservation.SetPermissions(base, commit_size,
340                                    PageAllocator::kReadWrite)) {
341       UpdateAllocatedSpaceLimits(base, base + commit_size);
342     } else {
343       base = kNullAddress;
344     }
345   }
346 
347   if (base == kNullAddress) {
348     // Failed to commit the body. Free the mapping and any partially committed
349     // regions inside it.
350     reservation.Free();
351     size_ -= reserve_size;
352     return kNullAddress;
353   }
354 
355   *controller = std::move(reservation);
356   return base;
357 }
358 
AllocateBasicChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,BaseSpace * owner)359 V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
360     size_t reserve_area_size, size_t commit_area_size, Executability executable,
361     BaseSpace* owner) {
362   DCHECK_LE(commit_area_size, reserve_area_size);
363 
364   size_t chunk_size;
365   Heap* heap = isolate_->heap();
366   Address base = kNullAddress;
367   VirtualMemory reservation;
368   Address area_start = kNullAddress;
369   Address area_end = kNullAddress;
370   void* address_hint =
371       AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
372 
373   //
374   // MemoryChunk layout:
375   //
376   //             Executable
377   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
378   // |           Header           |
379   // +----------------------------+<- base + CodePageGuardStartOffset
380   // |           Guard            |
381   // +----------------------------+<- area_start_
382   // |           Area             |
383   // +----------------------------+<- area_end_ (area_start + commit_area_size)
384   // |   Committed but not used   |
385   // +----------------------------+<- aligned at OS page boundary
386   // | Reserved but not committed |
387   // +----------------------------+<- aligned at OS page boundary
388   // |           Guard            |
389   // +----------------------------+<- base + chunk_size
390   //
391   //           Non-executable
392   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
393   // |          Header            |
394   // +----------------------------+<- area_start_ (base + area_start_)
395   // |           Area             |
396   // +----------------------------+<- area_end_ (area_start + commit_area_size)
397   // |  Committed but not used    |
398   // +----------------------------+<- aligned at OS page boundary
399   // | Reserved but not committed |
400   // +----------------------------+<- base + chunk_size
401   //
402 
403   if (executable == EXECUTABLE) {
404     chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
405                                reserve_area_size +
406                                MemoryChunkLayout::CodePageGuardSize(),
407                            GetCommitPageSize());
408 
409     // Size of header (not executable) plus area (executable).
410     size_t commit_size = ::RoundUp(
411         MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
412         GetCommitPageSize());
413     base =
414         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
415                               executable, address_hint, &reservation);
416     if (base == kNullAddress) return nullptr;
417     // Update executable memory size.
418     size_executable_ += reservation.size();
419 
420     if (Heap::ShouldZapGarbage()) {
421       ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
422       ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
423                commit_area_size, kZapValue);
424     }
425 
426     area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
427     area_end = area_start + commit_area_size;
428   } else {
429     chunk_size = ::RoundUp(
430         MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
431         GetCommitPageSize());
432     size_t commit_size = ::RoundUp(
433         MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
434         GetCommitPageSize());
435     base =
436         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
437                               executable, address_hint, &reservation);
438 
439     if (base == kNullAddress) return nullptr;
440 
441     if (Heap::ShouldZapGarbage()) {
442       ZapBlock(
443           base,
444           MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
445           kZapValue);
446     }
447 
448     area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
449     area_end = area_start + commit_area_size;
450   }
451 
452   // Use chunk_size for statistics because we assume that  treat reserved but
453   // not-yet committed memory regions of chunks as allocated.
454   LOG(isolate_,
455       NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
456 
457   // We cannot use the last chunk in the address space because we would
458   // overflow when comparing top and limit if this chunk is used for a
459   // linear allocation area.
460   if ((base + chunk_size) == 0u) {
461     CHECK(!last_chunk_.IsReserved());
462     last_chunk_ = std::move(reservation);
463     UncommitMemory(&last_chunk_);
464     size_ -= chunk_size;
465     if (executable == EXECUTABLE) {
466       size_executable_ -= chunk_size;
467     }
468     CHECK(last_chunk_.IsReserved());
469     return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
470                               owner);
471   }
472 
473   BasicMemoryChunk* chunk =
474       BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
475                                    owner, std::move(reservation));
476 
477   return chunk;
478 }
479 
AllocateChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,BaseSpace * owner)480 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
481                                             size_t commit_area_size,
482                                             Executability executable,
483                                             BaseSpace* owner) {
484   BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
485       reserve_area_size, commit_area_size, executable, owner);
486 
487   if (basic_chunk == nullptr) return nullptr;
488 
489   MemoryChunk* chunk =
490       MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
491 
492   if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
493   return chunk;
494 }
495 
PartialFreeMemory(BasicMemoryChunk * chunk,Address start_free,size_t bytes_to_free,Address new_area_end)496 void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
497                                         Address start_free,
498                                         size_t bytes_to_free,
499                                         Address new_area_end) {
500   VirtualMemory* reservation = chunk->reserved_memory();
501   DCHECK(reservation->IsReserved());
502   chunk->set_size(chunk->size() - bytes_to_free);
503   chunk->set_area_end(new_area_end);
504   if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
505     // Add guard page at the end.
506     size_t page_size = GetCommitPageSize();
507     DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
508     DCHECK_EQ(chunk->address() + chunk->size(),
509               chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
510     reservation->SetPermissions(chunk->area_end(), page_size,
511                                 PageAllocator::kNoAccess);
512   }
513   // On e.g. Windows, a reservation may be larger than a page and releasing
514   // partially starting at |start_free| will also release the potentially
515   // unused part behind the current page.
516   const size_t released_bytes = reservation->Release(start_free);
517   DCHECK_GE(size_, released_bytes);
518   size_ -= released_bytes;
519 }
520 
UnregisterSharedMemory(BasicMemoryChunk * chunk)521 void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
522   VirtualMemory* reservation = chunk->reserved_memory();
523   const size_t size =
524       reservation->IsReserved() ? reservation->size() : chunk->size();
525   DCHECK_GE(size_, static_cast<size_t>(size));
526   size_ -= size;
527 }
528 
UnregisterMemory(BasicMemoryChunk * chunk,Executability executable)529 void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
530                                        Executability executable) {
531   DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
532   VirtualMemory* reservation = chunk->reserved_memory();
533   const size_t size =
534       reservation->IsReserved() ? reservation->size() : chunk->size();
535   DCHECK_GE(size_, static_cast<size_t>(size));
536 
537   size_ -= size;
538   if (executable == EXECUTABLE) {
539     DCHECK_GE(size_executable_, size);
540     size_executable_ -= size;
541     UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
542   }
543   chunk->SetFlag(MemoryChunk::UNREGISTERED);
544 }
545 
UnregisterMemory(MemoryChunk * chunk)546 void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
547   UnregisterMemory(chunk, chunk->executable());
548 }
549 
FreeReadOnlyPage(ReadOnlyPage * chunk)550 void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
551   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
552   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
553 
554   UnregisterSharedMemory(chunk);
555 
556   v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
557   VirtualMemory* reservation = chunk->reserved_memory();
558   if (reservation->IsReserved()) {
559     reservation->FreeReadOnly();
560   } else {
561     // Only read-only pages can have a non-initialized reservation object. This
562     // happens when the pages are remapped to multiple locations and where the
563     // reservation would therefore be invalid.
564     FreeMemory(allocator, chunk->address(),
565                RoundUp(chunk->size(), allocator->AllocatePageSize()));
566   }
567 }
568 
PreFreeMemory(MemoryChunk * chunk)569 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
570   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
571   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
572   UnregisterMemory(chunk);
573   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
574                                          chunk->IsEvacuationCandidate());
575   chunk->SetFlag(MemoryChunk::PRE_FREED);
576 }
577 
PerformFreeMemory(MemoryChunk * chunk)578 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
579   DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
580   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
581   DCHECK(!chunk->InReadOnlySpace());
582   chunk->ReleaseAllAllocatedMemory();
583 
584   VirtualMemory* reservation = chunk->reserved_memory();
585   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
586     UncommitMemory(reservation);
587   } else {
588     DCHECK(reservation->IsReserved());
589     reservation->Free();
590   }
591 }
592 
593 template <MemoryAllocator::FreeMode mode>
Free(MemoryChunk * chunk)594 void MemoryAllocator::Free(MemoryChunk* chunk) {
595   switch (mode) {
596     case kFull:
597       PreFreeMemory(chunk);
598       PerformFreeMemory(chunk);
599       break;
600     case kAlreadyPooled:
601       // Pooled pages cannot be touched anymore as their memory is uncommitted.
602       // Pooled pages are not-executable.
603       FreeMemory(data_page_allocator(), chunk->address(),
604                  static_cast<size_t>(MemoryChunk::kPageSize));
605       break;
606     case kPooledAndQueue:
607       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
608       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
609       chunk->SetFlag(MemoryChunk::POOLED);
610       V8_FALLTHROUGH;
611     case kPreFreeAndQueue:
612       PreFreeMemory(chunk);
613       // The chunks added to this queue will be freed by a concurrent thread.
614       unmapper()->AddMemoryChunkSafe(chunk);
615       break;
616   }
617 }
618 
619 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
620     MemoryAllocator::kFull>(MemoryChunk* chunk);
621 
622 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
623     MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
624 
625 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
626     MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
627 
628 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
629     MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
630 
631 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
AllocatePage(size_t size,SpaceType * owner,Executability executable)632 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
633                                     Executability executable) {
634   MemoryChunk* chunk = nullptr;
635   if (alloc_mode == kPooled) {
636     DCHECK_EQ(size, static_cast<size_t>(
637                         MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
638                             owner->identity())));
639     DCHECK_EQ(executable, NOT_EXECUTABLE);
640     chunk = AllocatePagePooled(owner);
641   }
642   if (chunk == nullptr) {
643     chunk = AllocateChunk(size, size, executable, owner);
644   }
645   if (chunk == nullptr) return nullptr;
646   return owner->InitializePage(chunk);
647 }
648 
649 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
650     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
651         size_t size, PagedSpace* owner, Executability executable);
652 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
653     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
654         size_t size, SemiSpace* owner, Executability executable);
655 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
656     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
657         size_t size, SemiSpace* owner, Executability executable);
658 
AllocateReadOnlyPage(size_t size,ReadOnlySpace * owner)659 ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
660                                                     ReadOnlySpace* owner) {
661   BasicMemoryChunk* chunk = nullptr;
662   if (chunk == nullptr) {
663     chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
664   }
665   if (chunk == nullptr) return nullptr;
666   return owner->InitializePage(chunk);
667 }
668 
669 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
RemapSharedPage(::v8::PageAllocator::SharedMemory * shared_memory,Address new_address)670 MemoryAllocator::RemapSharedPage(
671     ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) {
672   return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
673 }
674 
AllocateLargePage(size_t size,LargeObjectSpace * owner,Executability executable)675 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
676                                               LargeObjectSpace* owner,
677                                               Executability executable) {
678   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
679   if (chunk == nullptr) return nullptr;
680   return LargePage::Initialize(isolate_->heap(), chunk, executable);
681 }
682 
683 template <typename SpaceType>
AllocatePagePooled(SpaceType * owner)684 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
685   MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
686   if (chunk == nullptr) return nullptr;
687   const int size = MemoryChunk::kPageSize;
688   const Address start = reinterpret_cast<Address>(chunk);
689   const Address area_start =
690       start +
691       MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
692   const Address area_end = start + size;
693   // Pooled pages are always regular data pages.
694   DCHECK_NE(CODE_SPACE, owner->identity());
695   VirtualMemory reservation(data_page_allocator(), start, size);
696   if (!CommitMemory(&reservation)) return nullptr;
697   if (Heap::ShouldZapGarbage()) {
698     ZapBlock(start, size, kZapValue);
699   }
700   BasicMemoryChunk* basic_chunk =
701       BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
702                                    area_end, owner, std::move(reservation));
703   MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
704   size_ += size;
705   return chunk;
706 }
707 
ZapBlock(Address start,size_t size,uintptr_t zap_value)708 void MemoryAllocator::ZapBlock(Address start, size_t size,
709                                uintptr_t zap_value) {
710   DCHECK(IsAligned(start, kTaggedSize));
711   DCHECK(IsAligned(size, kTaggedSize));
712   MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
713                size >> kTaggedSizeLog2);
714 }
715 
GetCommitPageSize()716 intptr_t MemoryAllocator::GetCommitPageSize() {
717   if (FLAG_v8_os_page_size != 0) {
718     DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
719     return FLAG_v8_os_page_size * KB;
720   } else {
721     return CommitPageSize();
722   }
723 }
724 
ComputeDiscardMemoryArea(Address addr,size_t size)725 base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
726                                                               size_t size) {
727   size_t page_size = MemoryAllocator::GetCommitPageSize();
728   if (size < page_size + FreeSpace::kSize) {
729     return base::AddressRegion(0, 0);
730   }
731   Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
732   Address discardable_end = RoundDown(addr + size, page_size);
733   if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
734   return base::AddressRegion(discardable_start,
735                              discardable_end - discardable_start);
736 }
737 
CommitExecutableMemory(VirtualMemory * vm,Address start,size_t commit_size,size_t reserved_size)738 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
739                                              size_t commit_size,
740                                              size_t reserved_size) {
741   const size_t page_size = GetCommitPageSize();
742   // All addresses and sizes must be aligned to the commit page size.
743   DCHECK(IsAligned(start, page_size));
744   DCHECK_EQ(0, commit_size % page_size);
745   DCHECK_EQ(0, reserved_size % page_size);
746   const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
747   const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
748   const size_t code_area_offset =
749       MemoryChunkLayout::ObjectStartOffsetInCodePage();
750   // reserved_size includes two guard regions, commit_size does not.
751   DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
752   const Address pre_guard_page = start + pre_guard_offset;
753   const Address code_area = start + code_area_offset;
754   const Address post_guard_page = start + reserved_size - guard_size;
755   // Commit the non-executable header, from start to pre-code guard page.
756   if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
757     // Create the pre-code guard page, following the header.
758     if (vm->SetPermissions(pre_guard_page, page_size,
759                            PageAllocator::kNoAccess)) {
760       // Commit the executable code body.
761       if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
762                              PageAllocator::kReadWrite)) {
763         // Create the post-code guard page.
764         if (vm->SetPermissions(post_guard_page, page_size,
765                                PageAllocator::kNoAccess)) {
766           UpdateAllocatedSpaceLimits(start, code_area + commit_size);
767           return true;
768         }
769         vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
770       }
771     }
772     vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
773   }
774   return false;
775 }
776 
777 }  // namespace internal
778 }  // namespace v8
779