1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/memory-allocator.h"
6
7 #include <cinttypes>
8
9 #include "src/base/address-region.h"
10 #include "src/common/globals.h"
11 #include "src/execution/isolate.h"
12 #include "src/flags/flags.h"
13 #include "src/heap/basic-memory-chunk.h"
14 #include "src/heap/gc-tracer-inl.h"
15 #include "src/heap/gc-tracer.h"
16 #include "src/heap/heap-inl.h"
17 #include "src/heap/heap.h"
18 #include "src/heap/memory-chunk.h"
19 #include "src/heap/read-only-spaces.h"
20 #include "src/logging/log.h"
21 #include "src/utils/allocation.h"
22
23 namespace v8 {
24 namespace internal {
25
26 // -----------------------------------------------------------------------------
27 // MemoryAllocator
28 //
29
30 size_t MemoryAllocator::commit_page_size_ = 0;
31 size_t MemoryAllocator::commit_page_size_bits_ = 0;
32
MemoryAllocator(Isolate * isolate,v8::PageAllocator * code_page_allocator,size_t capacity)33 MemoryAllocator::MemoryAllocator(Isolate* isolate,
34 v8::PageAllocator* code_page_allocator,
35 size_t capacity)
36 : isolate_(isolate),
37 data_page_allocator_(isolate->page_allocator()),
38 code_page_allocator_(code_page_allocator),
39 capacity_(RoundUp(capacity, Page::kPageSize)),
40 size_(0),
41 size_executable_(0),
42 lowest_ever_allocated_(static_cast<Address>(-1ll)),
43 highest_ever_allocated_(kNullAddress),
44 unmapper_(isolate->heap(), this) {
45 DCHECK_NOT_NULL(code_page_allocator);
46 }
47
TearDown()48 void MemoryAllocator::TearDown() {
49 unmapper()->TearDown();
50
51 // Check that spaces were torn down before MemoryAllocator.
52 DCHECK_EQ(size_, 0u);
53 // TODO(gc) this will be true again when we fix FreeMemory.
54 // DCHECK_EQ(0, size_executable_);
55 capacity_ = 0;
56
57 if (reserved_chunk_at_virtual_memory_limit_) {
58 reserved_chunk_at_virtual_memory_limit_->Free();
59 }
60
61 code_page_allocator_ = nullptr;
62 data_page_allocator_ = nullptr;
63 }
64
65 class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
66 public:
UnmapFreeMemoryJob(Isolate * isolate,Unmapper * unmapper)67 explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper)
68 : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {}
69
70 UnmapFreeMemoryJob(const UnmapFreeMemoryJob&) = delete;
71 UnmapFreeMemoryJob& operator=(const UnmapFreeMemoryJob&) = delete;
72
Run(JobDelegate * delegate)73 void Run(JobDelegate* delegate) override {
74 if (delegate->IsJoiningThread()) {
75 TRACE_GC(tracer_, GCTracer::Scope::UNMAPPER);
76 RunImpl(delegate);
77
78 } else {
79 TRACE_GC1(tracer_, GCTracer::Scope::BACKGROUND_UNMAPPER,
80 ThreadKind::kBackground);
81 RunImpl(delegate);
82 }
83 }
84
GetMaxConcurrency(size_t worker_count) const85 size_t GetMaxConcurrency(size_t worker_count) const override {
86 const size_t kTaskPerChunk = 8;
87 return std::min<size_t>(
88 kMaxUnmapperTasks,
89 worker_count +
90 (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) /
91 kTaskPerChunk);
92 }
93
94 private:
RunImpl(JobDelegate * delegate)95 void RunImpl(JobDelegate* delegate) {
96 unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled,
97 delegate);
98 if (FLAG_trace_unmapper) {
99 PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
100 }
101 }
102 Unmapper* const unmapper_;
103 GCTracer* const tracer_;
104 };
105
FreeQueuedChunks()106 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
107 if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
108 if (job_handle_ && job_handle_->IsValid()) {
109 job_handle_->NotifyConcurrencyIncrease();
110 } else {
111 job_handle_ = V8::GetCurrentPlatform()->PostJob(
112 TaskPriority::kUserVisible,
113 std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this));
114 if (FLAG_trace_unmapper) {
115 PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n");
116 }
117 }
118 } else {
119 PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled);
120 }
121 }
122
CancelAndWaitForPendingTasks()123 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
124 if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
125
126 if (FLAG_trace_unmapper) {
127 PrintIsolate(
128 heap_->isolate(),
129 "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
130 }
131 }
132
PrepareForGC()133 void MemoryAllocator::Unmapper::PrepareForGC() {
134 // Free non-regular chunks because they cannot be re-used.
135 PerformFreeMemoryOnQueuedNonRegularChunks();
136 }
137
EnsureUnmappingCompleted()138 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
139 CancelAndWaitForPendingTasks();
140 PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
141 }
142
PerformFreeMemoryOnQueuedNonRegularChunks(JobDelegate * delegate)143 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
144 JobDelegate* delegate) {
145 MemoryChunk* chunk = nullptr;
146 while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kNonRegular)) != nullptr) {
147 allocator_->PerformFreeMemory(chunk);
148 if (delegate && delegate->ShouldYield()) return;
149 }
150 }
151
PerformFreeMemoryOnQueuedChunks(MemoryAllocator::Unmapper::FreeMode mode,JobDelegate * delegate)152 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
153 MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) {
154 MemoryChunk* chunk = nullptr;
155 if (FLAG_trace_unmapper) {
156 PrintIsolate(
157 heap_->isolate(),
158 "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
159 NumberOfChunks());
160 }
161 // Regular chunks.
162 while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular)) != nullptr) {
163 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
164 allocator_->PerformFreeMemory(chunk);
165 if (pooled) AddMemoryChunkSafe(ChunkQueueType::kPooled, chunk);
166 if (delegate && delegate->ShouldYield()) return;
167 }
168 if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
169 // The previous loop uncommitted any pages marked as pooled and added them
170 // to the pooled list. In case of kFreePooled we need to free them though as
171 // well.
172 while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled)) != nullptr) {
173 allocator_->FreePooledChunk(chunk);
174 if (delegate && delegate->ShouldYield()) return;
175 }
176 }
177 PerformFreeMemoryOnQueuedNonRegularChunks();
178 }
179
TearDown()180 void MemoryAllocator::Unmapper::TearDown() {
181 CHECK(!job_handle_ || !job_handle_->IsValid());
182 PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
183 for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
184 DCHECK(chunks_[i].empty());
185 }
186 }
187
NumberOfCommittedChunks()188 size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
189 base::MutexGuard guard(&mutex_);
190 return chunks_[ChunkQueueType::kRegular].size() +
191 chunks_[ChunkQueueType::kNonRegular].size();
192 }
193
NumberOfChunks()194 int MemoryAllocator::Unmapper::NumberOfChunks() {
195 base::MutexGuard guard(&mutex_);
196 size_t result = 0;
197 for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
198 result += chunks_[i].size();
199 }
200 return static_cast<int>(result);
201 }
202
CommittedBufferedMemory()203 size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
204 base::MutexGuard guard(&mutex_);
205
206 size_t sum = 0;
207 // kPooled chunks are already uncommited. We only have to account for
208 // kRegular and kNonRegular chunks.
209 for (auto& chunk : chunks_[ChunkQueueType::kRegular]) {
210 sum += chunk->size();
211 }
212 for (auto& chunk : chunks_[ChunkQueueType::kNonRegular]) {
213 sum += chunk->size();
214 }
215 return sum;
216 }
217
CommitMemory(VirtualMemory * reservation)218 bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
219 Address base = reservation->address();
220 size_t size = reservation->size();
221 if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
222 return false;
223 }
224 UpdateAllocatedSpaceLimits(base, base + size);
225 return true;
226 }
227
UncommitMemory(VirtualMemory * reservation)228 bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
229 size_t size = reservation->size();
230 if (!reservation->SetPermissions(reservation->address(), size,
231 PageAllocator::kNoAccess)) {
232 return false;
233 }
234 return true;
235 }
236
FreeMemoryRegion(v8::PageAllocator * page_allocator,Address base,size_t size)237 void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
238 Address base, size_t size) {
239 FreePages(page_allocator, reinterpret_cast<void*>(base), size);
240 }
241
AllocateAlignedMemory(size_t chunk_size,size_t area_size,size_t alignment,Executability executable,void * hint,VirtualMemory * controller)242 Address MemoryAllocator::AllocateAlignedMemory(
243 size_t chunk_size, size_t area_size, size_t alignment,
244 Executability executable, void* hint, VirtualMemory* controller) {
245 v8::PageAllocator* page_allocator = this->page_allocator(executable);
246 DCHECK_LT(area_size, chunk_size);
247
248 VirtualMemory reservation(page_allocator, chunk_size, hint, alignment);
249 if (!reservation.IsReserved()) return HandleAllocationFailure();
250
251 // We cannot use the last chunk in the address space because we would
252 // overflow when comparing top and limit if this chunk is used for a
253 // linear allocation area.
254 if ((reservation.address() + static_cast<Address>(chunk_size)) == 0u) {
255 CHECK(!reserved_chunk_at_virtual_memory_limit_);
256 reserved_chunk_at_virtual_memory_limit_ = std::move(reservation);
257 CHECK(reserved_chunk_at_virtual_memory_limit_);
258
259 // Retry reserve virtual memory.
260 reservation = VirtualMemory(page_allocator, chunk_size, hint, alignment);
261 if (!reservation.IsReserved()) return HandleAllocationFailure();
262 }
263
264 Address base = reservation.address();
265
266 if (executable == EXECUTABLE) {
267 const size_t aligned_area_size = ::RoundUp(area_size, GetCommitPageSize());
268 if (!SetPermissionsOnExecutableMemoryChunk(&reservation, base,
269 aligned_area_size, chunk_size)) {
270 return HandleAllocationFailure();
271 }
272 } else {
273 // No guard page between page header and object area. This allows us to make
274 // all OS pages for both regions readable+writable at once.
275 const size_t commit_size =
276 ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
277 GetCommitPageSize());
278
279 if (reservation.SetPermissions(base, commit_size,
280 PageAllocator::kReadWrite)) {
281 UpdateAllocatedSpaceLimits(base, base + commit_size);
282 } else {
283 return HandleAllocationFailure();
284 }
285 }
286
287 *controller = std::move(reservation);
288 return base;
289 }
290
HandleAllocationFailure()291 Address MemoryAllocator::HandleAllocationFailure() {
292 Heap* heap = isolate_->heap();
293 if (!heap->deserialization_complete()) {
294 heap->FatalProcessOutOfMemory(
295 "MemoryChunk allocation failed during deserialization.");
296 }
297 return kNullAddress;
298 }
299
ComputeChunkSize(size_t area_size,Executability executable)300 size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
301 Executability executable) {
302 if (executable == EXECUTABLE) {
303 //
304 // Executable
305 // +----------------------------+<- base aligned at MemoryChunk::kAlignment
306 // | Header |
307 // +----------------------------+<- base + CodePageGuardStartOffset
308 // | Guard |
309 // +----------------------------+<- area_start_
310 // | Area |
311 // +----------------------------+<- area_end_ (area_start + area_size)
312 // | Committed but not used |
313 // +----------------------------+<- aligned at OS page boundary
314 // | Guard |
315 // +----------------------------+<- base + chunk_size
316 //
317
318 return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
319 area_size + MemoryChunkLayout::CodePageGuardSize(),
320 GetCommitPageSize());
321 }
322
323 //
324 // Non-executable
325 // +----------------------------+<- base aligned at MemoryChunk::kAlignment
326 // | Header |
327 // +----------------------------+<- area_start_ (base + area_start_)
328 // | Area |
329 // +----------------------------+<- area_end_ (area_start + area_size)
330 // | Committed but not used |
331 // +----------------------------+<- base + chunk_size
332 //
333 DCHECK_EQ(executable, NOT_EXECUTABLE);
334
335 return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
336 GetCommitPageSize());
337 }
338
339 base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
AllocateUninitializedChunk(BaseSpace * space,size_t area_size,Executability executable,PageSize page_size)340 MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
341 Executability executable,
342 PageSize page_size) {
343 #ifdef V8_COMPRESS_POINTERS
344 // When pointer compression is enabled, spaces are expected to be at a
345 // predictable address (see mkgrokdump) so we don't supply a hint and rely on
346 // the deterministic behaviour of the BoundedPageAllocator.
347 void* address_hint = nullptr;
348 #else
349 void* address_hint = AlignedAddress(isolate_->heap()->GetRandomMmapAddr(),
350 MemoryChunk::kAlignment);
351 #endif
352
353 VirtualMemory reservation;
354 size_t chunk_size = ComputeChunkSize(area_size, executable);
355 DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
356
357 Address base =
358 AllocateAlignedMemory(chunk_size, area_size, MemoryChunk::kAlignment,
359 executable, address_hint, &reservation);
360 if (base == kNullAddress) return {};
361
362 size_ += reservation.size();
363
364 // Update executable memory size.
365 if (executable == EXECUTABLE) {
366 size_executable_ += reservation.size();
367 }
368
369 if (Heap::ShouldZapGarbage()) {
370 if (executable == EXECUTABLE) {
371 // Page header and object area is split by guard page. Zap page header
372 // first.
373 ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
374 // Now zap object area.
375 ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
376 area_size, kZapValue);
377 } else {
378 DCHECK_EQ(executable, NOT_EXECUTABLE);
379 // Zap both page header and object area at once. No guard page in-between.
380 ZapBlock(base,
381 MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
382 kZapValue);
383 }
384 }
385
386 LOG(isolate_,
387 NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
388
389 Address area_start = base + MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
390 space->identity());
391 Address area_end = area_start + area_size;
392
393 return MemoryChunkAllocationResult{
394 reinterpret_cast<void*>(base), chunk_size, area_start, area_end,
395 std::move(reservation),
396 };
397 }
398
PartialFreeMemory(BasicMemoryChunk * chunk,Address start_free,size_t bytes_to_free,Address new_area_end)399 void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
400 Address start_free,
401 size_t bytes_to_free,
402 Address new_area_end) {
403 VirtualMemory* reservation = chunk->reserved_memory();
404 DCHECK(reservation->IsReserved());
405 chunk->set_size(chunk->size() - bytes_to_free);
406 chunk->set_area_end(new_area_end);
407 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
408 // Add guard page at the end.
409 size_t page_size = GetCommitPageSize();
410 DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
411 DCHECK_EQ(chunk->address() + chunk->size(),
412 chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
413 reservation->SetPermissions(chunk->area_end(), page_size,
414 PageAllocator::kNoAccess);
415 }
416 // On e.g. Windows, a reservation may be larger than a page and releasing
417 // partially starting at |start_free| will also release the potentially
418 // unused part behind the current page.
419 const size_t released_bytes = reservation->Release(start_free);
420 DCHECK_GE(size_, released_bytes);
421 size_ -= released_bytes;
422 }
423
UnregisterSharedBasicMemoryChunk(BasicMemoryChunk * chunk)424 void MemoryAllocator::UnregisterSharedBasicMemoryChunk(
425 BasicMemoryChunk* chunk) {
426 VirtualMemory* reservation = chunk->reserved_memory();
427 const size_t size =
428 reservation->IsReserved() ? reservation->size() : chunk->size();
429 DCHECK_GE(size_, static_cast<size_t>(size));
430 size_ -= size;
431 }
432
UnregisterBasicMemoryChunk(BasicMemoryChunk * chunk,Executability executable)433 void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
434 Executability executable) {
435 DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
436 VirtualMemory* reservation = chunk->reserved_memory();
437 const size_t size =
438 reservation->IsReserved() ? reservation->size() : chunk->size();
439 DCHECK_GE(size_, static_cast<size_t>(size));
440
441 size_ -= size;
442 if (executable == EXECUTABLE) {
443 DCHECK_GE(size_executable_, size);
444 size_executable_ -= size;
445 #ifdef DEBUG
446 UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
447 #endif // DEBUG
448 chunk->heap()->UnregisterUnprotectedMemoryChunk(
449 static_cast<MemoryChunk*>(chunk));
450 }
451 chunk->SetFlag(MemoryChunk::UNREGISTERED);
452 }
453
UnregisterMemoryChunk(MemoryChunk * chunk)454 void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) {
455 UnregisterBasicMemoryChunk(chunk, chunk->executable());
456 }
457
UnregisterReadOnlyPage(ReadOnlyPage * page)458 void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) {
459 DCHECK(!page->executable());
460 UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE);
461 }
462
FreeReadOnlyPage(ReadOnlyPage * chunk)463 void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
464 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
465 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
466
467 UnregisterSharedBasicMemoryChunk(chunk);
468
469 v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
470 VirtualMemory* reservation = chunk->reserved_memory();
471 if (reservation->IsReserved()) {
472 reservation->FreeReadOnly();
473 } else {
474 // Only read-only pages can have a non-initialized reservation object. This
475 // happens when the pages are remapped to multiple locations and where the
476 // reservation would therefore be invalid.
477 FreeMemoryRegion(allocator, chunk->address(),
478 RoundUp(chunk->size(), allocator->AllocatePageSize()));
479 }
480 }
481
PreFreeMemory(MemoryChunk * chunk)482 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
483 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
484 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
485 UnregisterMemoryChunk(chunk);
486 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
487 chunk->IsEvacuationCandidate());
488 chunk->SetFlag(MemoryChunk::PRE_FREED);
489 }
490
PerformFreeMemory(MemoryChunk * chunk)491 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
492 DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
493 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
494 DCHECK(!chunk->InReadOnlySpace());
495 chunk->ReleaseAllAllocatedMemory();
496
497 VirtualMemory* reservation = chunk->reserved_memory();
498 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
499 UncommitMemory(reservation);
500 } else {
501 DCHECK(reservation->IsReserved());
502 reservation->Free();
503 }
504 }
505
Free(MemoryAllocator::FreeMode mode,MemoryChunk * chunk)506 void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
507 switch (mode) {
508 case FreeMode::kImmediately:
509 PreFreeMemory(chunk);
510 PerformFreeMemory(chunk);
511 break;
512 case FreeMode::kConcurrentlyAndPool:
513 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
514 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
515 chunk->SetFlag(MemoryChunk::POOLED);
516 V8_FALLTHROUGH;
517 case FreeMode::kConcurrently:
518 PreFreeMemory(chunk);
519 // The chunks added to this queue will be freed by a concurrent thread.
520 unmapper()->AddMemoryChunkSafe(chunk);
521 break;
522 }
523 }
524
FreePooledChunk(MemoryChunk * chunk)525 void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
526 // Pooled pages cannot be touched anymore as their memory is uncommitted.
527 // Pooled pages are not-executable.
528 FreeMemoryRegion(data_page_allocator(), chunk->address(),
529 static_cast<size_t>(MemoryChunk::kPageSize));
530 }
531
AllocatePage(MemoryAllocator::AllocationMode alloc_mode,Space * space,Executability executable)532 Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
533 Space* space, Executability executable) {
534 size_t size =
535 MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space->identity());
536 base::Optional<MemoryChunkAllocationResult> chunk_info;
537 if (alloc_mode == AllocationMode::kUsePool) {
538 DCHECK_EQ(size, static_cast<size_t>(
539 MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
540 space->identity())));
541 DCHECK_EQ(executable, NOT_EXECUTABLE);
542 chunk_info = AllocateUninitializedPageFromPool(space);
543 }
544
545 if (!chunk_info) {
546 chunk_info =
547 AllocateUninitializedChunk(space, size, executable, PageSize::kRegular);
548 }
549
550 if (!chunk_info) return nullptr;
551
552 Page* page = new (chunk_info->start) Page(
553 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
554 chunk_info->area_end, std::move(chunk_info->reservation), executable);
555
556 #ifdef DEBUG
557 if (page->executable()) RegisterExecutableMemoryChunk(page);
558 #endif // DEBUG
559
560 space->InitializePage(page);
561 return page;
562 }
563
AllocateReadOnlyPage(ReadOnlySpace * space)564 ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space) {
565 DCHECK_EQ(space->identity(), RO_SPACE);
566 size_t size = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE);
567 base::Optional<MemoryChunkAllocationResult> chunk_info =
568 AllocateUninitializedChunk(space, size, NOT_EXECUTABLE,
569 PageSize::kRegular);
570 if (!chunk_info) return nullptr;
571 return new (chunk_info->start) ReadOnlyPage(
572 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
573 chunk_info->area_end, std::move(chunk_info->reservation));
574 }
575
576 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
RemapSharedPage(::v8::PageAllocator::SharedMemory * shared_memory,Address new_address)577 MemoryAllocator::RemapSharedPage(
578 ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) {
579 return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
580 }
581
AllocateLargePage(LargeObjectSpace * space,size_t object_size,Executability executable)582 LargePage* MemoryAllocator::AllocateLargePage(LargeObjectSpace* space,
583 size_t object_size,
584 Executability executable) {
585 base::Optional<MemoryChunkAllocationResult> chunk_info =
586 AllocateUninitializedChunk(space, object_size, executable,
587 PageSize::kLarge);
588
589 if (!chunk_info) return nullptr;
590
591 LargePage* page = new (chunk_info->start) LargePage(
592 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
593 chunk_info->area_end, std::move(chunk_info->reservation), executable);
594
595 #ifdef DEBUG
596 if (page->executable()) RegisterExecutableMemoryChunk(page);
597 #endif // DEBUG
598
599 return page;
600 }
601
602 base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
AllocateUninitializedPageFromPool(Space * space)603 MemoryAllocator::AllocateUninitializedPageFromPool(Space* space) {
604 void* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
605 if (chunk == nullptr) return {};
606 const int size = MemoryChunk::kPageSize;
607 const Address start = reinterpret_cast<Address>(chunk);
608 const Address area_start =
609 start +
610 MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
611 const Address area_end = start + size;
612 // Pooled pages are always regular data pages.
613 DCHECK_NE(CODE_SPACE, space->identity());
614 VirtualMemory reservation(data_page_allocator(), start, size);
615 if (!CommitMemory(&reservation)) return {};
616 if (Heap::ShouldZapGarbage()) {
617 ZapBlock(start, size, kZapValue);
618 }
619
620 size_ += size;
621 return MemoryChunkAllocationResult{
622 chunk, size, area_start, area_end, std::move(reservation),
623 };
624 }
625
ZapBlock(Address start,size_t size,uintptr_t zap_value)626 void MemoryAllocator::ZapBlock(Address start, size_t size,
627 uintptr_t zap_value) {
628 DCHECK(IsAligned(start, kTaggedSize));
629 DCHECK(IsAligned(size, kTaggedSize));
630 MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
631 size >> kTaggedSizeLog2);
632 }
633
InitializeOncePerProcess()634 void MemoryAllocator::InitializeOncePerProcess() {
635 commit_page_size_ =
636 FLAG_v8_os_page_size > 0 ? FLAG_v8_os_page_size * KB : CommitPageSize();
637 CHECK(base::bits::IsPowerOfTwo(commit_page_size_));
638 commit_page_size_bits_ = base::bits::WhichPowerOfTwo(commit_page_size_);
639 }
640
ComputeDiscardMemoryArea(Address addr,size_t size)641 base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
642 size_t size) {
643 size_t page_size = GetCommitPageSize();
644 if (size < page_size + FreeSpace::kSize) {
645 return base::AddressRegion(0, 0);
646 }
647 Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
648 Address discardable_end = RoundDown(addr + size, page_size);
649 if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
650 return base::AddressRegion(discardable_start,
651 discardable_end - discardable_start);
652 }
653
SetPermissionsOnExecutableMemoryChunk(VirtualMemory * vm,Address start,size_t area_size,size_t chunk_size)654 bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
655 Address start,
656 size_t area_size,
657 size_t chunk_size) {
658 const size_t page_size = GetCommitPageSize();
659
660 // All addresses and sizes must be aligned to the commit page size.
661 DCHECK(IsAligned(start, page_size));
662 DCHECK_EQ(0, area_size % page_size);
663 DCHECK_EQ(0, chunk_size % page_size);
664
665 const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
666 const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
667 const size_t code_area_offset =
668 MemoryChunkLayout::ObjectStartOffsetInCodePage();
669
670 DCHECK_EQ(pre_guard_offset + guard_size + area_size + guard_size, chunk_size);
671
672 const Address pre_guard_page = start + pre_guard_offset;
673 const Address code_area = start + code_area_offset;
674 const Address post_guard_page = start + chunk_size - guard_size;
675
676 // Commit the non-executable header, from start to pre-code guard page.
677 if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
678 // Create the pre-code guard page, following the header.
679 if (vm->SetPermissions(pre_guard_page, page_size,
680 PageAllocator::kNoAccess)) {
681 // Commit the executable code body.
682 if (vm->SetPermissions(code_area, area_size,
683 MemoryChunk::GetCodeModificationPermission())) {
684 // Create the post-code guard page.
685 if (vm->SetPermissions(post_guard_page, page_size,
686 PageAllocator::kNoAccess)) {
687 UpdateAllocatedSpaceLimits(start, code_area + area_size);
688 return true;
689 }
690
691 vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess);
692 }
693 }
694
695 vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
696 }
697
698 return false;
699 }
700
701 } // namespace internal
702 } // namespace v8
703