1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/spaces.h"
6
7 #include <utility>
8
9 #include "src/base/bits.h"
10 #include "src/base/macros.h"
11 #include "src/base/platform/semaphore.h"
12 #include "src/base/template-utils.h"
13 #include "src/counters.h"
14 #include "src/heap/array-buffer-tracker.h"
15 #include "src/heap/concurrent-marking.h"
16 #include "src/heap/gc-tracer.h"
17 #include "src/heap/heap-controller.h"
18 #include "src/heap/incremental-marking.h"
19 #include "src/heap/mark-compact.h"
20 #include "src/heap/remembered-set.h"
21 #include "src/heap/slot-set.h"
22 #include "src/heap/sweeper.h"
23 #include "src/msan.h"
24 #include "src/objects-inl.h"
25 #include "src/objects/js-array-buffer-inl.h"
26 #include "src/objects/js-array-inl.h"
27 #include "src/snapshot/snapshot.h"
28 #include "src/v8.h"
29 #include "src/vm-state-inl.h"
30
31 namespace v8 {
32 namespace internal {
33
34 // ----------------------------------------------------------------------------
35 // HeapObjectIterator
36
HeapObjectIterator(PagedSpace * space)37 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
38 : cur_addr_(kNullAddress),
39 cur_end_(kNullAddress),
40 space_(space),
41 page_range_(space->first_page(), nullptr),
42 current_page_(page_range_.begin()) {}
43
HeapObjectIterator(Page * page)44 HeapObjectIterator::HeapObjectIterator(Page* page)
45 : cur_addr_(kNullAddress),
46 cur_end_(kNullAddress),
47 space_(reinterpret_cast<PagedSpace*>(page->owner())),
48 page_range_(page),
49 current_page_(page_range_.begin()) {
50 #ifdef DEBUG
51 Space* owner = page->owner();
52 DCHECK(owner == page->heap()->old_space() ||
53 owner == page->heap()->map_space() ||
54 owner == page->heap()->code_space() ||
55 owner == page->heap()->read_only_space());
56 #endif // DEBUG
57 }
58
59 // We have hit the end of the page and should advance to the next block of
60 // objects. This happens at the end of the page.
AdvanceToNextPage()61 bool HeapObjectIterator::AdvanceToNextPage() {
62 DCHECK_EQ(cur_addr_, cur_end_);
63 if (current_page_ == page_range_.end()) return false;
64 Page* cur_page = *(current_page_++);
65 Heap* heap = space_->heap();
66
67 heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
68 #ifdef ENABLE_MINOR_MC
69 if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
70 heap->minor_mark_compact_collector()->MakeIterable(
71 cur_page, MarkingTreatmentMode::CLEAR,
72 FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
73 #else
74 DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
75 #endif // ENABLE_MINOR_MC
76 cur_addr_ = cur_page->area_start();
77 cur_end_ = cur_page->area_end();
78 DCHECK(cur_page->SweepingDone());
79 return true;
80 }
81
PauseAllocationObserversScope(Heap * heap)82 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
83 : heap_(heap) {
84 DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
85
86 for (SpaceIterator it(heap_); it.has_next();) {
87 it.next()->PauseAllocationObservers();
88 }
89 }
90
~PauseAllocationObserversScope()91 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
92 for (SpaceIterator it(heap_); it.has_next();) {
93 it.next()->ResumeAllocationObservers();
94 }
95 }
96
97 // -----------------------------------------------------------------------------
98 // CodeRange
99
100 static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
101 LAZY_INSTANCE_INITIALIZER;
102
CodeRange(Isolate * isolate,size_t requested)103 CodeRange::CodeRange(Isolate* isolate, size_t requested)
104 : isolate_(isolate),
105 free_list_(0),
106 allocation_list_(0),
107 current_allocation_block_index_(0),
108 requested_code_range_size_(0) {
109 DCHECK(!virtual_memory_.IsReserved());
110
111 if (requested == 0) {
112 // When a target requires the code range feature, we put all code objects
113 // in a kMaximalCodeRangeSize range of virtual address space, so that
114 // they can call each other with near calls.
115 if (kRequiresCodeRange) {
116 requested = kMaximalCodeRangeSize;
117 } else {
118 return;
119 }
120 }
121
122 if (requested <= kMinimumCodeRangeSize) {
123 requested = kMinimumCodeRangeSize;
124 }
125
126 const size_t reserved_area =
127 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
128 if (requested < (kMaximalCodeRangeSize - reserved_area))
129 requested += reserved_area;
130
131 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
132
133 requested_code_range_size_ = requested;
134
135 VirtualMemory reservation;
136 void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
137 if (!AlignedAllocVirtualMemory(
138 requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint,
139 &reservation)) {
140 V8::FatalProcessOutOfMemory(isolate,
141 "CodeRange setup: allocate virtual memory");
142 }
143
144 // We are sure that we have mapped a block of requested addresses.
145 DCHECK_GE(reservation.size(), requested);
146 Address base = reservation.address();
147
148 // On some platforms, specifically Win64, we need to reserve some pages at
149 // the beginning of an executable space.
150 if (reserved_area > 0) {
151 if (!reservation.SetPermissions(base, reserved_area,
152 PageAllocator::kReadWrite))
153 V8::FatalProcessOutOfMemory(isolate, "CodeRange setup: set permissions");
154
155 base += reserved_area;
156 }
157 Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
158 size_t size = reservation.size() - (aligned_base - base) - reserved_area;
159 allocation_list_.emplace_back(aligned_base, size);
160 current_allocation_block_index_ = 0;
161
162 LOG(isolate_,
163 NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
164 requested));
165 virtual_memory_.TakeControl(&reservation);
166 }
167
~CodeRange()168 CodeRange::~CodeRange() {
169 if (virtual_memory_.IsReserved()) {
170 Address addr = start();
171 virtual_memory_.Free();
172 code_range_address_hint.Pointer()->NotifyFreedCodeRange(
173 reinterpret_cast<void*>(addr), requested_code_range_size_);
174 }
175 }
176
CompareFreeBlockAddress(const FreeBlock & left,const FreeBlock & right)177 bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
178 const FreeBlock& right) {
179 return left.start < right.start;
180 }
181
182
GetNextAllocationBlock(size_t requested)183 bool CodeRange::GetNextAllocationBlock(size_t requested) {
184 for (current_allocation_block_index_++;
185 current_allocation_block_index_ < allocation_list_.size();
186 current_allocation_block_index_++) {
187 if (requested <= allocation_list_[current_allocation_block_index_].size) {
188 return true; // Found a large enough allocation block.
189 }
190 }
191
192 // Sort and merge the free blocks on the free list and the allocation list.
193 free_list_.insert(free_list_.end(), allocation_list_.begin(),
194 allocation_list_.end());
195 allocation_list_.clear();
196 std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
197 for (size_t i = 0; i < free_list_.size();) {
198 FreeBlock merged = free_list_[i];
199 i++;
200 // Add adjacent free blocks to the current merged block.
201 while (i < free_list_.size() &&
202 free_list_[i].start == merged.start + merged.size) {
203 merged.size += free_list_[i].size;
204 i++;
205 }
206 if (merged.size > 0) {
207 allocation_list_.push_back(merged);
208 }
209 }
210 free_list_.clear();
211
212 for (current_allocation_block_index_ = 0;
213 current_allocation_block_index_ < allocation_list_.size();
214 current_allocation_block_index_++) {
215 if (requested <= allocation_list_[current_allocation_block_index_].size) {
216 return true; // Found a large enough allocation block.
217 }
218 }
219 current_allocation_block_index_ = 0;
220 // Code range is full or too fragmented.
221 return false;
222 }
223
224
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)225 Address CodeRange::AllocateRawMemory(const size_t requested_size,
226 const size_t commit_size,
227 size_t* allocated) {
228 // requested_size includes the header and two guard regions, while commit_size
229 // only includes the header.
230 DCHECK_LE(commit_size,
231 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
232 FreeBlock current;
233 if (!ReserveBlock(requested_size, ¤t)) {
234 *allocated = 0;
235 return kNullAddress;
236 }
237 *allocated = current.size;
238 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
239 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
240 &virtual_memory_, current.start, commit_size, *allocated)) {
241 *allocated = 0;
242 ReleaseBlock(¤t);
243 return kNullAddress;
244 }
245 return current.start;
246 }
247
FreeRawMemory(Address address,size_t length)248 void CodeRange::FreeRawMemory(Address address, size_t length) {
249 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
250 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
251 free_list_.emplace_back(address, length);
252 virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
253 }
254
ReserveBlock(const size_t requested_size,FreeBlock * block)255 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
256 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
257 DCHECK(allocation_list_.empty() ||
258 current_allocation_block_index_ < allocation_list_.size());
259 if (allocation_list_.empty() ||
260 requested_size > allocation_list_[current_allocation_block_index_].size) {
261 // Find an allocation block large enough.
262 if (!GetNextAllocationBlock(requested_size)) return false;
263 }
264 // Commit the requested memory at the start of the current allocation block.
265 size_t aligned_requested = ::RoundUp(requested_size, MemoryChunk::kAlignment);
266 *block = allocation_list_[current_allocation_block_index_];
267 // Don't leave a small free block, useless for a large object or chunk.
268 if (aligned_requested < (block->size - Page::kPageSize)) {
269 block->size = aligned_requested;
270 }
271 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
272 allocation_list_[current_allocation_block_index_].start += block->size;
273 allocation_list_[current_allocation_block_index_].size -= block->size;
274 return true;
275 }
276
277
ReleaseBlock(const FreeBlock * block)278 void CodeRange::ReleaseBlock(const FreeBlock* block) {
279 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
280 free_list_.push_back(*block);
281 }
282
GetAddressHint(size_t code_range_size)283 void* CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
284 base::LockGuard<base::Mutex> guard(&mutex_);
285 auto it = recently_freed_.find(code_range_size);
286 if (it == recently_freed_.end() || it->second.empty()) {
287 return GetRandomMmapAddr();
288 }
289 void* result = it->second.back();
290 it->second.pop_back();
291 return result;
292 }
293
NotifyFreedCodeRange(void * code_range_start,size_t code_range_size)294 void CodeRangeAddressHint::NotifyFreedCodeRange(void* code_range_start,
295 size_t code_range_size) {
296 base::LockGuard<base::Mutex> guard(&mutex_);
297 recently_freed_[code_range_size].push_back(code_range_start);
298 }
299
300 // -----------------------------------------------------------------------------
301 // MemoryAllocator
302 //
303
MemoryAllocator(Isolate * isolate,size_t capacity,size_t code_range_size)304 MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
305 size_t code_range_size)
306 : isolate_(isolate),
307 code_range_(nullptr),
308 capacity_(RoundUp(capacity, Page::kPageSize)),
309 size_(0),
310 size_executable_(0),
311 lowest_ever_allocated_(static_cast<Address>(-1ll)),
312 highest_ever_allocated_(kNullAddress),
313 unmapper_(isolate->heap(), this) {
314 code_range_ = new CodeRange(isolate_, code_range_size);
315 }
316
317
TearDown()318 void MemoryAllocator::TearDown() {
319 unmapper()->TearDown();
320
321 // Check that spaces were torn down before MemoryAllocator.
322 DCHECK_EQ(size_, 0u);
323 // TODO(gc) this will be true again when we fix FreeMemory.
324 // DCHECK_EQ(0, size_executable_);
325 capacity_ = 0;
326
327 if (last_chunk_.IsReserved()) {
328 last_chunk_.Free();
329 }
330
331 delete code_range_;
332 code_range_ = nullptr;
333 }
334
335 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
336 public:
UnmapFreeMemoryTask(Isolate * isolate,Unmapper * unmapper)337 explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
338 : CancelableTask(isolate),
339 unmapper_(unmapper),
340 tracer_(isolate->heap()->tracer()) {}
341
342 private:
RunInternal()343 void RunInternal() override {
344 TRACE_BACKGROUND_GC(tracer_,
345 GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
346 unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
347 unmapper_->active_unmapping_tasks_--;
348 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
349 if (FLAG_trace_unmapper) {
350 PrintIsolate(unmapper_->heap_->isolate(),
351 "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
352 }
353 }
354
355 Unmapper* const unmapper_;
356 GCTracer* const tracer_;
357 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
358 };
359
FreeQueuedChunks()360 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
361 if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
362 if (!MakeRoomForNewTasks()) {
363 // kMaxUnmapperTasks are already running. Avoid creating any more.
364 if (FLAG_trace_unmapper) {
365 PrintIsolate(heap_->isolate(),
366 "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
367 kMaxUnmapperTasks);
368 }
369 return;
370 }
371 auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
372 if (FLAG_trace_unmapper) {
373 PrintIsolate(heap_->isolate(),
374 "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
375 task->id());
376 }
377 DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
378 DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
379 DCHECK_GE(active_unmapping_tasks_, 0);
380 active_unmapping_tasks_++;
381 task_ids_[pending_unmapping_tasks_++] = task->id();
382 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
383 } else {
384 PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
385 }
386 }
387
CancelAndWaitForPendingTasks()388 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
389 for (int i = 0; i < pending_unmapping_tasks_; i++) {
390 if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
391 CancelableTaskManager::kTaskAborted) {
392 pending_unmapping_tasks_semaphore_.Wait();
393 }
394 }
395 pending_unmapping_tasks_ = 0;
396 active_unmapping_tasks_ = 0;
397
398 if (FLAG_trace_unmapper) {
399 PrintIsolate(
400 heap_->isolate(),
401 "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
402 }
403 }
404
PrepareForMarkCompact()405 void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
406 CancelAndWaitForPendingTasks();
407 // Free non-regular chunks because they cannot be re-used.
408 PerformFreeMemoryOnQueuedNonRegularChunks();
409 }
410
EnsureUnmappingCompleted()411 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
412 CancelAndWaitForPendingTasks();
413 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
414 }
415
MakeRoomForNewTasks()416 bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
417 DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
418
419 if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
420 // All previous unmapping tasks have been run to completion.
421 // Finalize those tasks to make room for new ones.
422 CancelAndWaitForPendingTasks();
423 }
424 return pending_unmapping_tasks_ != kMaxUnmapperTasks;
425 }
426
PerformFreeMemoryOnQueuedNonRegularChunks()427 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
428 MemoryChunk* chunk = nullptr;
429 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
430 allocator_->PerformFreeMemory(chunk);
431 }
432 }
433
434 template <MemoryAllocator::Unmapper::FreeMode mode>
PerformFreeMemoryOnQueuedChunks()435 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
436 MemoryChunk* chunk = nullptr;
437 if (FLAG_trace_unmapper) {
438 PrintIsolate(
439 heap_->isolate(),
440 "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
441 NumberOfChunks());
442 }
443 // Regular chunks.
444 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
445 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
446 allocator_->PerformFreeMemory(chunk);
447 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
448 }
449 if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
450 // The previous loop uncommitted any pages marked as pooled and added them
451 // to the pooled list. In case of kReleasePooled we need to free them
452 // though.
453 while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
454 allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
455 }
456 }
457 PerformFreeMemoryOnQueuedNonRegularChunks();
458 }
459
TearDown()460 void MemoryAllocator::Unmapper::TearDown() {
461 CHECK_EQ(0, pending_unmapping_tasks_);
462 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
463 for (int i = 0; i < kNumberOfChunkQueues; i++) {
464 DCHECK(chunks_[i].empty());
465 }
466 }
467
NumberOfChunks()468 int MemoryAllocator::Unmapper::NumberOfChunks() {
469 base::LockGuard<base::Mutex> guard(&mutex_);
470 size_t result = 0;
471 for (int i = 0; i < kNumberOfChunkQueues; i++) {
472 result += chunks_[i].size();
473 }
474 return static_cast<int>(result);
475 }
476
CommittedBufferedMemory()477 size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
478 base::LockGuard<base::Mutex> guard(&mutex_);
479
480 size_t sum = 0;
481 // kPooled chunks are already uncommited. We only have to account for
482 // kRegular and kNonRegular chunks.
483 for (auto& chunk : chunks_[kRegular]) {
484 sum += chunk->size();
485 }
486 for (auto& chunk : chunks_[kNonRegular]) {
487 sum += chunk->size();
488 }
489 return sum;
490 }
491
CommitMemory(Address base,size_t size)492 bool MemoryAllocator::CommitMemory(Address base, size_t size) {
493 if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
494 return false;
495 }
496 UpdateAllocatedSpaceLimits(base, base + size);
497 return true;
498 }
499
FreeMemory(VirtualMemory * reservation,Executability executable)500 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
501 Executability executable) {
502 // TODO(gc) make code_range part of memory allocator?
503 // Code which is part of the code-range does not have its own VirtualMemory.
504 DCHECK(code_range() == nullptr ||
505 !code_range()->contains(reservation->address()));
506 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
507 reservation->size() <= Page::kPageSize);
508
509 reservation->Free();
510 }
511
512
FreeMemory(Address base,size_t size,Executability executable)513 void MemoryAllocator::FreeMemory(Address base, size_t size,
514 Executability executable) {
515 // TODO(gc) make code_range part of memory allocator?
516 if (code_range() != nullptr && code_range()->contains(base)) {
517 DCHECK(executable == EXECUTABLE);
518 code_range()->FreeRawMemory(base, size);
519 } else {
520 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
521 CHECK(FreePages(reinterpret_cast<void*>(base), size));
522 }
523 }
524
ReserveAlignedMemory(size_t size,size_t alignment,void * hint,VirtualMemory * controller)525 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
526 void* hint,
527 VirtualMemory* controller) {
528 VirtualMemory reservation;
529 if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) {
530 return kNullAddress;
531 }
532
533 Address result = reservation.address();
534 size_ += reservation.size();
535 controller->TakeControl(&reservation);
536 return result;
537 }
538
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,void * hint,VirtualMemory * controller)539 Address MemoryAllocator::AllocateAlignedMemory(
540 size_t reserve_size, size_t commit_size, size_t alignment,
541 Executability executable, void* hint, VirtualMemory* controller) {
542 DCHECK(commit_size <= reserve_size);
543 VirtualMemory reservation;
544 Address base =
545 ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
546 if (base == kNullAddress) return kNullAddress;
547
548 if (executable == EXECUTABLE) {
549 if (!CommitExecutableMemory(&reservation, base, commit_size,
550 reserve_size)) {
551 base = kNullAddress;
552 }
553 } else {
554 if (reservation.SetPermissions(base, commit_size,
555 PageAllocator::kReadWrite)) {
556 UpdateAllocatedSpaceLimits(base, base + commit_size);
557 } else {
558 base = kNullAddress;
559 }
560 }
561
562 if (base == kNullAddress) {
563 // Failed to commit the body. Free the mapping and any partially committed
564 // regions inside it.
565 reservation.Free();
566 size_ -= reserve_size;
567 return kNullAddress;
568 }
569
570 controller->TakeControl(&reservation);
571 return base;
572 }
573
synchronized_heap()574 Heap* MemoryChunk::synchronized_heap() {
575 return reinterpret_cast<Heap*>(
576 base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
577 }
578
InitializationMemoryFence()579 void MemoryChunk::InitializationMemoryFence() {
580 base::SeqCst_MemoryFence();
581 #ifdef THREAD_SANITIZER
582 // Since TSAN does not process memory fences, we use the following annotation
583 // to tell TSAN that there is no data race when emitting a
584 // InitializationMemoryFence. Note that the other thread still needs to
585 // perform MemoryChunk::synchronized_heap().
586 base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
587 reinterpret_cast<base::AtomicWord>(heap_));
588 #endif
589 }
590
SetReadAndExecutable()591 void MemoryChunk::SetReadAndExecutable() {
592 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
593 DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
594 // Decrementing the write_unprotect_counter_ and changing the page
595 // protection mode has to be atomic.
596 base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
597 if (write_unprotect_counter_ == 0) {
598 // This is a corner case that may happen when we have a
599 // CodeSpaceMemoryModificationScope open and this page was newly
600 // added.
601 return;
602 }
603 write_unprotect_counter_--;
604 DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
605 if (write_unprotect_counter_ == 0) {
606 Address protect_start =
607 address() + MemoryAllocator::CodePageAreaStartOffset();
608 size_t page_size = MemoryAllocator::GetCommitPageSize();
609 DCHECK(IsAddressAligned(protect_start, page_size));
610 size_t protect_size = RoundUp(area_size(), page_size);
611 CHECK(SetPermissions(protect_start, protect_size,
612 PageAllocator::kReadExecute));
613 }
614 }
615
SetReadAndWritable()616 void MemoryChunk::SetReadAndWritable() {
617 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
618 DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
619 // Incrementing the write_unprotect_counter_ and changing the page
620 // protection mode has to be atomic.
621 base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
622 write_unprotect_counter_++;
623 DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
624 if (write_unprotect_counter_ == 1) {
625 Address unprotect_start =
626 address() + MemoryAllocator::CodePageAreaStartOffset();
627 size_t page_size = MemoryAllocator::GetCommitPageSize();
628 DCHECK(IsAddressAligned(unprotect_start, page_size));
629 size_t unprotect_size = RoundUp(area_size(), page_size);
630 CHECK(SetPermissions(unprotect_start, unprotect_size,
631 PageAllocator::kReadWrite));
632 }
633 }
634
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner,VirtualMemory * reservation)635 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
636 Address area_start, Address area_end,
637 Executability executable, Space* owner,
638 VirtualMemory* reservation) {
639 MemoryChunk* chunk = FromAddress(base);
640
641 DCHECK(base == chunk->address());
642
643 chunk->heap_ = heap;
644 chunk->size_ = size;
645 chunk->area_start_ = area_start;
646 chunk->area_end_ = area_end;
647 chunk->flags_ = Flags(NO_FLAGS);
648 chunk->set_owner(owner);
649 chunk->InitializeReservedMemory();
650 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
651 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
652 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
653 nullptr);
654 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
655 nullptr);
656 chunk->invalidated_slots_ = nullptr;
657 chunk->skip_list_ = nullptr;
658 chunk->progress_bar_ = 0;
659 chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
660 chunk->set_concurrent_sweeping_state(kSweepingDone);
661 chunk->page_protection_change_mutex_ = new base::Mutex();
662 chunk->write_unprotect_counter_ = 0;
663 chunk->mutex_ = new base::Mutex();
664 chunk->allocated_bytes_ = chunk->area_size();
665 chunk->wasted_memory_ = 0;
666 chunk->young_generation_bitmap_ = nullptr;
667 chunk->local_tracker_ = nullptr;
668
669 chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
670 0;
671 chunk->external_backing_store_bytes_
672 [ExternalBackingStoreType::kExternalString] = 0;
673
674 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
675 chunk->categories_[i] = nullptr;
676 }
677
678 if (owner->identity() == RO_SPACE) {
679 heap->incremental_marking()
680 ->non_atomic_marking_state()
681 ->bitmap(chunk)
682 ->MarkAllBits();
683 } else {
684 heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
685 chunk);
686 }
687
688 DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
689
690 if (executable == EXECUTABLE) {
691 chunk->SetFlag(IS_EXECUTABLE);
692 if (heap->write_protect_code_memory()) {
693 chunk->write_unprotect_counter_ =
694 heap->code_space_memory_modification_scope_depth();
695 } else {
696 size_t page_size = MemoryAllocator::GetCommitPageSize();
697 DCHECK(IsAddressAligned(area_start, page_size));
698 size_t area_size = RoundUp(area_end - area_start, page_size);
699 CHECK(SetPermissions(area_start, area_size,
700 PageAllocator::kReadWriteExecute));
701 }
702 }
703
704 if (reservation != nullptr) {
705 chunk->reservation_.TakeControl(reservation);
706 }
707
708 return chunk;
709 }
710
InitializePage(MemoryChunk * chunk,Executability executable)711 Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
712 Page* page = static_cast<Page*>(chunk);
713 DCHECK_GE(Page::kAllocatableMemory, page->area_size());
714 // Make sure that categories are initialized before freeing the area.
715 page->ResetAllocatedBytes();
716 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
717 page->AllocateFreeListCategories();
718 page->InitializeFreeListCategories();
719 page->list_node().Initialize();
720 page->InitializationMemoryFence();
721 return page;
722 }
723
InitializePage(MemoryChunk * chunk,Executability executable)724 Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
725 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
726 bool in_to_space = (id() != kFromSpace);
727 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
728 : MemoryChunk::IN_FROM_SPACE);
729 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
730 : MemoryChunk::IN_TO_SPACE));
731 Page* page = static_cast<Page*>(chunk);
732 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
733 page->AllocateLocalTracker();
734 page->list_node().Initialize();
735 #ifdef ENABLE_MINOR_MC
736 if (FLAG_minor_mc) {
737 page->AllocateYoungGenerationBitmap();
738 heap()
739 ->minor_mark_compact_collector()
740 ->non_atomic_marking_state()
741 ->ClearLiveness(page);
742 }
743 #endif // ENABLE_MINOR_MC
744 page->InitializationMemoryFence();
745 return page;
746 }
747
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable)748 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
749 Executability executable) {
750 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
751 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
752 FATAL("Code page is too large.");
753 }
754
755 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
756
757 LargePage* page = static_cast<LargePage*>(chunk);
758 page->list_node().Initialize();
759 return page;
760 }
761
AllocateFreeListCategories()762 void Page::AllocateFreeListCategories() {
763 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
764 categories_[i] = new FreeListCategory(
765 reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
766 }
767 }
768
InitializeFreeListCategories()769 void Page::InitializeFreeListCategories() {
770 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
771 categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
772 }
773 }
774
ReleaseFreeListCategories()775 void Page::ReleaseFreeListCategories() {
776 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
777 if (categories_[i] != nullptr) {
778 delete categories_[i];
779 categories_[i] = nullptr;
780 }
781 }
782 }
783
ConvertNewToOld(Page * old_page)784 Page* Page::ConvertNewToOld(Page* old_page) {
785 DCHECK(old_page);
786 DCHECK(old_page->InNewSpace());
787 OldSpace* old_space = old_page->heap()->old_space();
788 old_page->set_owner(old_space);
789 old_page->SetFlags(0, static_cast<uintptr_t>(~0));
790 Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
791 old_space->AddPage(new_page);
792 return new_page;
793 }
794
CommittedPhysicalMemory()795 size_t MemoryChunk::CommittedPhysicalMemory() {
796 if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
797 return size();
798 return high_water_mark_;
799 }
800
IsPagedSpace() const801 bool MemoryChunk::IsPagedSpace() const {
802 return owner()->identity() != LO_SPACE;
803 }
804
InOldSpace() const805 bool MemoryChunk::InOldSpace() const {
806 return owner()->identity() == OLD_SPACE;
807 }
808
InLargeObjectSpace() const809 bool MemoryChunk::InLargeObjectSpace() const {
810 return owner()->identity() == LO_SPACE;
811 }
812
AllocateChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,Space * owner)813 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
814 size_t commit_area_size,
815 Executability executable,
816 Space* owner) {
817 DCHECK_LE(commit_area_size, reserve_area_size);
818
819 size_t chunk_size;
820 Heap* heap = isolate_->heap();
821 Address base = kNullAddress;
822 VirtualMemory reservation;
823 Address area_start = kNullAddress;
824 Address area_end = kNullAddress;
825 void* address_hint =
826 AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
827
828 //
829 // MemoryChunk layout:
830 //
831 // Executable
832 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
833 // | Header |
834 // +----------------------------+<- base + CodePageGuardStartOffset
835 // | Guard |
836 // +----------------------------+<- area_start_
837 // | Area |
838 // +----------------------------+<- area_end_ (area_start + commit_area_size)
839 // | Committed but not used |
840 // +----------------------------+<- aligned at OS page boundary
841 // | Reserved but not committed |
842 // +----------------------------+<- aligned at OS page boundary
843 // | Guard |
844 // +----------------------------+<- base + chunk_size
845 //
846 // Non-executable
847 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
848 // | Header |
849 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
850 // | Area |
851 // +----------------------------+<- area_end_ (area_start + commit_area_size)
852 // | Committed but not used |
853 // +----------------------------+<- aligned at OS page boundary
854 // | Reserved but not committed |
855 // +----------------------------+<- base + chunk_size
856 //
857
858 if (executable == EXECUTABLE) {
859 chunk_size = ::RoundUp(
860 CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
861 GetCommitPageSize());
862
863 // Size of header (not executable) plus area (executable).
864 size_t commit_size = ::RoundUp(
865 CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
866 // Allocate executable memory either from code range or from the OS.
867 #ifdef V8_TARGET_ARCH_MIPS64
868 // Use code range only for large object space on mips64 to keep address
869 // range within 256-MB memory region.
870 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
871 #else
872 if (code_range()->valid()) {
873 #endif
874 base =
875 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
876 DCHECK(IsAligned(base, MemoryChunk::kAlignment));
877 if (base == kNullAddress) return nullptr;
878 size_ += chunk_size;
879 // Update executable memory size.
880 size_executable_ += chunk_size;
881 } else {
882 base = AllocateAlignedMemory(chunk_size, commit_size,
883 MemoryChunk::kAlignment, executable,
884 address_hint, &reservation);
885 if (base == kNullAddress) return nullptr;
886 // Update executable memory size.
887 size_executable_ += reservation.size();
888 }
889
890 if (Heap::ShouldZapGarbage()) {
891 ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
892 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
893 }
894
895 area_start = base + CodePageAreaStartOffset();
896 area_end = area_start + commit_area_size;
897 } else {
898 chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
899 GetCommitPageSize());
900 size_t commit_size =
901 ::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
902 GetCommitPageSize());
903 base =
904 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
905 executable, address_hint, &reservation);
906
907 if (base == kNullAddress) return nullptr;
908
909 if (Heap::ShouldZapGarbage()) {
910 ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
911 }
912
913 area_start = base + Page::kObjectStartOffset;
914 area_end = area_start + commit_area_size;
915 }
916
917 // Use chunk_size for statistics and callbacks because we assume that they
918 // treat reserved but not-yet committed memory regions of chunks as allocated.
919 isolate_->counters()->memory_allocated()->Increment(
920 static_cast<int>(chunk_size));
921
922 LOG(isolate_,
923 NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
924
925 // We cannot use the last chunk in the address space because we would
926 // overflow when comparing top and limit if this chunk is used for a
927 // linear allocation area.
928 if ((base + chunk_size) == 0u) {
929 CHECK(!last_chunk_.IsReserved());
930 last_chunk_.TakeControl(&reservation);
931 UncommitBlock(last_chunk_.address(), last_chunk_.size());
932 size_ -= chunk_size;
933 if (executable == EXECUTABLE) {
934 size_executable_ -= chunk_size;
935 }
936 CHECK(last_chunk_.IsReserved());
937 return AllocateChunk(reserve_area_size, commit_area_size, executable,
938 owner);
939 }
940
941 MemoryChunk* chunk =
942 MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
943 executable, owner, &reservation);
944
945 if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
946 return chunk;
947 }
948
949 void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
950 if (is_marking) {
951 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
952 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
953 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
954 } else {
955 ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
956 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
957 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
958 }
959 }
960
961 void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
962 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
963 if (is_marking) {
964 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
965 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
966 } else {
967 ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
968 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
969 }
970 }
971
972 void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
973
974 void Page::AllocateLocalTracker() {
975 DCHECK_NULL(local_tracker_);
976 local_tracker_ = new LocalArrayBufferTracker(this);
977 }
978
979 bool Page::contains_array_buffers() {
980 return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
981 }
982
983 void Page::ResetFreeListStatistics() {
984 wasted_memory_ = 0;
985 }
986
987 size_t Page::AvailableInFreeList() {
988 size_t sum = 0;
989 ForAllFreeListCategories([&sum](FreeListCategory* category) {
990 sum += category->available();
991 });
992 return sum;
993 }
994
995 #ifdef DEBUG
996 namespace {
997 // Skips filler starting from the given filler until the end address.
998 // Returns the first address after the skipped fillers.
999 Address SkipFillers(HeapObject* filler, Address end) {
1000 Address addr = filler->address();
1001 while (addr < end) {
1002 filler = HeapObject::FromAddress(addr);
1003 CHECK(filler->IsFiller());
1004 addr = filler->address() + filler->Size();
1005 }
1006 return addr;
1007 }
1008 } // anonymous namespace
1009 #endif // DEBUG
1010
1011 size_t Page::ShrinkToHighWaterMark() {
1012 // Shrinking only makes sense outside of the CodeRange, where we don't care
1013 // about address space fragmentation.
1014 VirtualMemory* reservation = reserved_memory();
1015 if (!reservation->IsReserved()) return 0;
1016
1017 // Shrink pages to high water mark. The water mark points either to a filler
1018 // or the area_end.
1019 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
1020 if (filler->address() == area_end()) return 0;
1021 CHECK(filler->IsFiller());
1022 // Ensure that no objects were allocated in [filler, area_end) region.
1023 DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
1024 // Ensure that no objects will be allocated on this page.
1025 DCHECK_EQ(0u, AvailableInFreeList());
1026
1027 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
1028 MemoryAllocator::GetCommitPageSize());
1029 if (unused > 0) {
1030 DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
1031 if (FLAG_trace_gc_verbose) {
1032 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
1033 reinterpret_cast<void*>(this),
1034 reinterpret_cast<void*>(area_end()),
1035 reinterpret_cast<void*>(area_end() - unused));
1036 }
1037 heap()->CreateFillerObjectAt(
1038 filler->address(),
1039 static_cast<int>(area_end() - filler->address() - unused),
1040 ClearRecordedSlots::kNo);
1041 heap()->memory_allocator()->PartialFreeMemory(
1042 this, address() + size() - unused, unused, area_end() - unused);
1043 if (filler->address() != area_end()) {
1044 CHECK(filler->IsFiller());
1045 CHECK_EQ(filler->address() + filler->Size(), area_end());
1046 }
1047 }
1048 return unused;
1049 }
1050
1051 void Page::CreateBlackArea(Address start, Address end) {
1052 DCHECK(heap()->incremental_marking()->black_allocation());
1053 DCHECK_EQ(Page::FromAddress(start), this);
1054 DCHECK_NE(start, end);
1055 DCHECK_EQ(Page::FromAddress(end - 1), this);
1056 IncrementalMarking::MarkingState* marking_state =
1057 heap()->incremental_marking()->marking_state();
1058 marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
1059 AddressToMarkbitIndex(end));
1060 marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
1061 }
1062
1063 void Page::DestroyBlackArea(Address start, Address end) {
1064 DCHECK(heap()->incremental_marking()->black_allocation());
1065 DCHECK_EQ(Page::FromAddress(start), this);
1066 DCHECK_NE(start, end);
1067 DCHECK_EQ(Page::FromAddress(end - 1), this);
1068 IncrementalMarking::MarkingState* marking_state =
1069 heap()->incremental_marking()->marking_state();
1070 marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
1071 AddressToMarkbitIndex(end));
1072 marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
1073 }
1074
1075 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1076 size_t bytes_to_free,
1077 Address new_area_end) {
1078 VirtualMemory* reservation = chunk->reserved_memory();
1079 DCHECK(reservation->IsReserved());
1080 chunk->size_ -= bytes_to_free;
1081 chunk->area_end_ = new_area_end;
1082 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1083 // Add guard page at the end.
1084 size_t page_size = GetCommitPageSize();
1085 DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
1086 DCHECK_EQ(chunk->address() + chunk->size(),
1087 chunk->area_end() + CodePageGuardSize());
1088 reservation->SetPermissions(chunk->area_end_, page_size,
1089 PageAllocator::kNoAccess);
1090 }
1091 // On e.g. Windows, a reservation may be larger than a page and releasing
1092 // partially starting at |start_free| will also release the potentially
1093 // unused part behind the current page.
1094 const size_t released_bytes = reservation->Release(start_free);
1095 DCHECK_GE(size_, released_bytes);
1096 size_ -= released_bytes;
1097 isolate_->counters()->memory_allocated()->Decrement(
1098 static_cast<int>(released_bytes));
1099 }
1100
1101 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
1102 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1103 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
1104
1105 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
1106 chunk->IsEvacuationCandidate());
1107
1108 VirtualMemory* reservation = chunk->reserved_memory();
1109 const size_t size =
1110 reservation->IsReserved() ? reservation->size() : chunk->size();
1111 DCHECK_GE(size_, static_cast<size_t>(size));
1112 size_ -= size;
1113 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1114 if (chunk->executable() == EXECUTABLE) {
1115 DCHECK_GE(size_executable_, size);
1116 size_executable_ -= size;
1117 }
1118
1119 chunk->SetFlag(MemoryChunk::PRE_FREED);
1120
1121 if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
1122 }
1123
1124
1125 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
1126 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1127 chunk->ReleaseAllocatedMemory();
1128
1129 VirtualMemory* reservation = chunk->reserved_memory();
1130 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1131 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
1132 } else {
1133 if (reservation->IsReserved()) {
1134 FreeMemory(reservation, chunk->executable());
1135 } else {
1136 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
1137 }
1138 }
1139 }
1140
1141 template <MemoryAllocator::FreeMode mode>
1142 void MemoryAllocator::Free(MemoryChunk* chunk) {
1143 switch (mode) {
1144 case kFull:
1145 PreFreeMemory(chunk);
1146 PerformFreeMemory(chunk);
1147 break;
1148 case kAlreadyPooled:
1149 // Pooled pages cannot be touched anymore as their memory is uncommitted.
1150 FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
1151 Executability::NOT_EXECUTABLE);
1152 break;
1153 case kPooledAndQueue:
1154 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
1155 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
1156 chunk->SetFlag(MemoryChunk::POOLED);
1157 V8_FALLTHROUGH;
1158 case kPreFreeAndQueue:
1159 PreFreeMemory(chunk);
1160 // The chunks added to this queue will be freed by a concurrent thread.
1161 unmapper()->AddMemoryChunkSafe(chunk);
1162 break;
1163 }
1164 }
1165
1166 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
1167
1168 template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
1169 MemoryChunk* chunk);
1170
1171 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
1172 MemoryChunk* chunk);
1173
1174 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
1175 MemoryChunk* chunk);
1176
1177 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
1178 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
1179 Executability executable) {
1180 MemoryChunk* chunk = nullptr;
1181 if (alloc_mode == kPooled) {
1182 DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
1183 DCHECK_EQ(executable, NOT_EXECUTABLE);
1184 chunk = AllocatePagePooled(owner);
1185 }
1186 if (chunk == nullptr) {
1187 chunk = AllocateChunk(size, size, executable, owner);
1188 }
1189 if (chunk == nullptr) return nullptr;
1190 return owner->InitializePage(chunk, executable);
1191 }
1192
1193 template Page*
1194 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1195 size_t size, PagedSpace* owner, Executability executable);
1196 template Page*
1197 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1198 size_t size, SemiSpace* owner, Executability executable);
1199 template Page*
1200 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1201 size_t size, SemiSpace* owner, Executability executable);
1202
1203 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
1204 LargeObjectSpace* owner,
1205 Executability executable) {
1206 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
1207 if (chunk == nullptr) return nullptr;
1208 return LargePage::Initialize(isolate_->heap(), chunk, executable);
1209 }
1210
1211 template <typename SpaceType>
1212 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1213 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
1214 if (chunk == nullptr) return nullptr;
1215 const int size = MemoryChunk::kPageSize;
1216 const Address start = reinterpret_cast<Address>(chunk);
1217 const Address area_start = start + MemoryChunk::kObjectStartOffset;
1218 const Address area_end = start + size;
1219 if (!CommitBlock(start, size)) {
1220 return nullptr;
1221 }
1222 VirtualMemory reservation(start, size);
1223 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1224 NOT_EXECUTABLE, owner, &reservation);
1225 size_ += size;
1226 return chunk;
1227 }
1228
1229 bool MemoryAllocator::CommitBlock(Address start, size_t size) {
1230 if (!CommitMemory(start, size)) return false;
1231
1232 if (Heap::ShouldZapGarbage()) {
1233 ZapBlock(start, size, kZapValue);
1234 }
1235
1236 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
1237 return true;
1238 }
1239
1240
1241 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
1242 if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
1243 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1244 return true;
1245 }
1246
1247 void MemoryAllocator::ZapBlock(Address start, size_t size,
1248 uintptr_t zap_value) {
1249 DCHECK_EQ(start % kPointerSize, 0);
1250 DCHECK_EQ(size % kPointerSize, 0);
1251 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1252 Memory<Address>(start + s) = static_cast<Address>(zap_value);
1253 }
1254 }
1255
1256 size_t MemoryAllocator::CodePageGuardStartOffset() {
1257 // We are guarding code pages: the first OS page after the header
1258 // will be protected as non-writable.
1259 return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1260 }
1261
1262 size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
1263
1264 size_t MemoryAllocator::CodePageAreaStartOffset() {
1265 // We are guarding code pages: the first OS page after the header
1266 // will be protected as non-writable.
1267 return CodePageGuardStartOffset() + CodePageGuardSize();
1268 }
1269
1270 size_t MemoryAllocator::CodePageAreaEndOffset() {
1271 // We are guarding code pages: the last OS page will be protected as
1272 // non-writable.
1273 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1274 }
1275
1276 intptr_t MemoryAllocator::GetCommitPageSize() {
1277 if (FLAG_v8_os_page_size != 0) {
1278 DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1279 return FLAG_v8_os_page_size * KB;
1280 } else {
1281 return CommitPageSize();
1282 }
1283 }
1284
1285 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
1286 size_t commit_size,
1287 size_t reserved_size) {
1288 const size_t page_size = GetCommitPageSize();
1289 // All addresses and sizes must be aligned to the commit page size.
1290 DCHECK(IsAddressAligned(start, page_size));
1291 DCHECK_EQ(0, commit_size % page_size);
1292 DCHECK_EQ(0, reserved_size % page_size);
1293 const size_t guard_size = CodePageGuardSize();
1294 const size_t pre_guard_offset = CodePageGuardStartOffset();
1295 const size_t code_area_offset = CodePageAreaStartOffset();
1296 // reserved_size includes two guard regions, commit_size does not.
1297 DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
1298 const Address pre_guard_page = start + pre_guard_offset;
1299 const Address code_area = start + code_area_offset;
1300 const Address post_guard_page = start + reserved_size - guard_size;
1301 // Commit the non-executable header, from start to pre-code guard page.
1302 if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
1303 // Create the pre-code guard page, following the header.
1304 if (vm->SetPermissions(pre_guard_page, page_size,
1305 PageAllocator::kNoAccess)) {
1306 // Commit the executable code body.
1307 if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
1308 PageAllocator::kReadWrite)) {
1309 // Create the post-code guard page.
1310 if (vm->SetPermissions(post_guard_page, page_size,
1311 PageAllocator::kNoAccess)) {
1312 UpdateAllocatedSpaceLimits(start, code_area + commit_size);
1313 return true;
1314 }
1315 vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
1316 }
1317 }
1318 vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
1319 }
1320 return false;
1321 }
1322
1323
1324 // -----------------------------------------------------------------------------
1325 // MemoryChunk implementation
1326
1327 void MemoryChunk::ReleaseAllocatedMemory() {
1328 if (skip_list_ != nullptr) {
1329 delete skip_list_;
1330 skip_list_ = nullptr;
1331 }
1332 if (mutex_ != nullptr) {
1333 delete mutex_;
1334 mutex_ = nullptr;
1335 }
1336 if (page_protection_change_mutex_ != nullptr) {
1337 delete page_protection_change_mutex_;
1338 page_protection_change_mutex_ = nullptr;
1339 }
1340 ReleaseSlotSet<OLD_TO_NEW>();
1341 ReleaseSlotSet<OLD_TO_OLD>();
1342 ReleaseTypedSlotSet<OLD_TO_NEW>();
1343 ReleaseTypedSlotSet<OLD_TO_OLD>();
1344 ReleaseInvalidatedSlots();
1345 if (local_tracker_ != nullptr) ReleaseLocalTracker();
1346 if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
1347
1348 if (IsPagedSpace()) {
1349 Page* page = static_cast<Page*>(this);
1350 page->ReleaseFreeListCategories();
1351 }
1352 }
1353
1354 static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
1355 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1356 DCHECK_LT(0, pages);
1357 SlotSet* slot_set = new SlotSet[pages];
1358 for (size_t i = 0; i < pages; i++) {
1359 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1360 }
1361 return slot_set;
1362 }
1363
1364 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
1365 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
1366
1367 template <RememberedSetType type>
1368 SlotSet* MemoryChunk::AllocateSlotSet() {
1369 SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1370 SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1371 &slot_set_[type], nullptr, slot_set);
1372 if (old_slot_set != nullptr) {
1373 delete[] slot_set;
1374 slot_set = old_slot_set;
1375 }
1376 DCHECK(slot_set);
1377 return slot_set;
1378 }
1379
1380 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
1381 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1382
1383 template <RememberedSetType type>
1384 void MemoryChunk::ReleaseSlotSet() {
1385 SlotSet* slot_set = slot_set_[type];
1386 if (slot_set) {
1387 slot_set_[type] = nullptr;
1388 delete[] slot_set;
1389 }
1390 }
1391
1392 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
1393 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1394
1395 template <RememberedSetType type>
1396 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1397 TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
1398 TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1399 &typed_slot_set_[type], nullptr, typed_slot_set);
1400 if (old_value != nullptr) {
1401 delete typed_slot_set;
1402 typed_slot_set = old_value;
1403 }
1404 DCHECK(typed_slot_set);
1405 return typed_slot_set;
1406 }
1407
1408 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
1409 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1410
1411 template <RememberedSetType type>
1412 void MemoryChunk::ReleaseTypedSlotSet() {
1413 TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1414 if (typed_slot_set) {
1415 typed_slot_set_[type] = nullptr;
1416 delete typed_slot_set;
1417 }
1418 }
1419
1420 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
1421 DCHECK_NULL(invalidated_slots_);
1422 invalidated_slots_ = new InvalidatedSlots();
1423 return invalidated_slots_;
1424 }
1425
1426 void MemoryChunk::ReleaseInvalidatedSlots() {
1427 if (invalidated_slots_) {
1428 delete invalidated_slots_;
1429 invalidated_slots_ = nullptr;
1430 }
1431 }
1432
1433 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
1434 int size) {
1435 if (!ShouldSkipEvacuationSlotRecording()) {
1436 if (invalidated_slots() == nullptr) {
1437 AllocateInvalidatedSlots();
1438 }
1439 int old_size = (*invalidated_slots())[object];
1440 (*invalidated_slots())[object] = std::max(old_size, size);
1441 }
1442 }
1443
1444 void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
1445 HeapObject* new_start) {
1446 DCHECK_LT(old_start, new_start);
1447 DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
1448 MemoryChunk::FromHeapObject(new_start));
1449 if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
1450 auto it = invalidated_slots()->find(old_start);
1451 if (it != invalidated_slots()->end()) {
1452 int old_size = it->second;
1453 int delta = static_cast<int>(new_start->address() - old_start->address());
1454 invalidated_slots()->erase(it);
1455 (*invalidated_slots())[new_start] = old_size - delta;
1456 }
1457 }
1458 }
1459
1460 void MemoryChunk::ReleaseLocalTracker() {
1461 DCHECK_NOT_NULL(local_tracker_);
1462 delete local_tracker_;
1463 local_tracker_ = nullptr;
1464 }
1465
1466 void MemoryChunk::AllocateYoungGenerationBitmap() {
1467 DCHECK_NULL(young_generation_bitmap_);
1468 young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
1469 }
1470
1471 void MemoryChunk::ReleaseYoungGenerationBitmap() {
1472 DCHECK_NOT_NULL(young_generation_bitmap_);
1473 free(young_generation_bitmap_);
1474 young_generation_bitmap_ = nullptr;
1475 }
1476
1477 void MemoryChunk::IncrementExternalBackingStoreBytes(
1478 ExternalBackingStoreType type, size_t amount) {
1479 external_backing_store_bytes_[type] += amount;
1480 owner()->IncrementExternalBackingStoreBytes(type, amount);
1481 }
1482
1483 void MemoryChunk::DecrementExternalBackingStoreBytes(
1484 ExternalBackingStoreType type, size_t amount) {
1485 DCHECK_GE(external_backing_store_bytes_[type], amount);
1486 external_backing_store_bytes_[type] -= amount;
1487 owner()->DecrementExternalBackingStoreBytes(type, amount);
1488 }
1489
1490 // -----------------------------------------------------------------------------
1491 // PagedSpace implementation
1492
1493 void Space::AddAllocationObserver(AllocationObserver* observer) {
1494 allocation_observers_.push_back(observer);
1495 StartNextInlineAllocationStep();
1496 }
1497
1498 void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1499 auto it = std::find(allocation_observers_.begin(),
1500 allocation_observers_.end(), observer);
1501 DCHECK(allocation_observers_.end() != it);
1502 allocation_observers_.erase(it);
1503 StartNextInlineAllocationStep();
1504 }
1505
1506 void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
1507
1508 void Space::ResumeAllocationObservers() {
1509 allocation_observers_paused_ = false;
1510 }
1511
1512 void Space::AllocationStep(int bytes_since_last, Address soon_object,
1513 int size) {
1514 if (!AllocationObserversActive()) {
1515 return;
1516 }
1517
1518 DCHECK(!heap()->allocation_step_in_progress());
1519 heap()->set_allocation_step_in_progress(true);
1520 heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
1521 for (AllocationObserver* observer : allocation_observers_) {
1522 observer->AllocationStep(bytes_since_last, soon_object, size);
1523 }
1524 heap()->set_allocation_step_in_progress(false);
1525 }
1526
1527 intptr_t Space::GetNextInlineAllocationStepSize() {
1528 intptr_t next_step = 0;
1529 for (AllocationObserver* observer : allocation_observers_) {
1530 next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
1531 : observer->bytes_to_next_step();
1532 }
1533 DCHECK(allocation_observers_.size() == 0 || next_step > 0);
1534 return next_step;
1535 }
1536
1537 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1538 Executability executable)
1539 : SpaceWithLinearArea(heap, space), executable_(executable) {
1540 area_size_ = MemoryAllocator::PageAreaSize(space);
1541 accounting_stats_.Clear();
1542 }
1543
1544 void PagedSpace::TearDown() {
1545 while (!memory_chunk_list_.Empty()) {
1546 MemoryChunk* chunk = memory_chunk_list_.front();
1547 memory_chunk_list_.Remove(chunk);
1548 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
1549 }
1550 accounting_stats_.Clear();
1551 }
1552
1553 void PagedSpace::RefillFreeList() {
1554 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1555 // generation spaces out.
1556 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1557 identity() != MAP_SPACE && identity() != RO_SPACE) {
1558 return;
1559 }
1560 MarkCompactCollector* collector = heap()->mark_compact_collector();
1561 size_t added = 0;
1562 {
1563 Page* p = nullptr;
1564 while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
1565 // Only during compaction pages can actually change ownership. This is
1566 // safe because there exists no other competing action on the page links
1567 // during compaction.
1568 if (is_local()) {
1569 DCHECK_NE(this, p->owner());
1570 PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
1571 base::LockGuard<base::Mutex> guard(owner->mutex());
1572 owner->RefineAllocatedBytesAfterSweeping(p);
1573 owner->RemovePage(p);
1574 added += AddPage(p);
1575 } else {
1576 base::LockGuard<base::Mutex> guard(mutex());
1577 DCHECK_EQ(this, p->owner());
1578 RefineAllocatedBytesAfterSweeping(p);
1579 added += RelinkFreeListCategories(p);
1580 }
1581 added += p->wasted_memory();
1582 if (is_local() && (added > kCompactionMemoryWanted)) break;
1583 }
1584 }
1585 }
1586
1587 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1588 base::LockGuard<base::Mutex> guard(mutex());
1589
1590 DCHECK(identity() == other->identity());
1591 // Unmerged fields:
1592 // area_size_
1593 other->FreeLinearAllocationArea();
1594
1595 // The linear allocation area of {other} should be destroyed now.
1596 DCHECK_EQ(kNullAddress, other->top());
1597 DCHECK_EQ(kNullAddress, other->limit());
1598
1599 // Move over pages.
1600 for (auto it = other->begin(); it != other->end();) {
1601 Page* p = *(it++);
1602 // Relinking requires the category to be unlinked.
1603 other->RemovePage(p);
1604 AddPage(p);
1605 DCHECK_EQ(p->AvailableInFreeList(),
1606 p->AvailableInFreeListFromAllocatedBytes());
1607 }
1608 DCHECK_EQ(0u, other->Size());
1609 DCHECK_EQ(0u, other->Capacity());
1610 }
1611
1612
1613 size_t PagedSpace::CommittedPhysicalMemory() {
1614 if (!base::OS::HasLazyCommits()) return CommittedMemory();
1615 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1616 size_t size = 0;
1617 for (Page* page : *this) {
1618 size += page->CommittedPhysicalMemory();
1619 }
1620 return size;
1621 }
1622
1623 bool PagedSpace::ContainsSlow(Address addr) {
1624 Page* p = Page::FromAddress(addr);
1625 for (Page* page : *this) {
1626 if (page == p) return true;
1627 }
1628 return false;
1629 }
1630
1631 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
1632 CHECK(page->SweepingDone());
1633 auto marking_state =
1634 heap()->incremental_marking()->non_atomic_marking_state();
1635 // The live_byte on the page was accounted in the space allocated
1636 // bytes counter. After sweeping allocated_bytes() contains the
1637 // accurate live byte count on the page.
1638 size_t old_counter = marking_state->live_bytes(page);
1639 size_t new_counter = page->allocated_bytes();
1640 DCHECK_GE(old_counter, new_counter);
1641 if (old_counter > new_counter) {
1642 DecreaseAllocatedBytes(old_counter - new_counter, page);
1643 // Give the heap a chance to adjust counters in response to the
1644 // more precise and smaller old generation size.
1645 heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
1646 }
1647 marking_state->SetLiveBytes(page, 0);
1648 }
1649
1650 Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
1651 base::LockGuard<base::Mutex> guard(mutex());
1652 // Check for pages that still contain free list entries. Bail out for smaller
1653 // categories.
1654 const int minimum_category =
1655 static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
1656 Page* page = free_list()->GetPageForCategoryType(kHuge);
1657 if (!page && static_cast<int>(kLarge) >= minimum_category)
1658 page = free_list()->GetPageForCategoryType(kLarge);
1659 if (!page && static_cast<int>(kMedium) >= minimum_category)
1660 page = free_list()->GetPageForCategoryType(kMedium);
1661 if (!page && static_cast<int>(kSmall) >= minimum_category)
1662 page = free_list()->GetPageForCategoryType(kSmall);
1663 if (!page && static_cast<int>(kTiny) >= minimum_category)
1664 page = free_list()->GetPageForCategoryType(kTiny);
1665 if (!page && static_cast<int>(kTiniest) >= minimum_category)
1666 page = free_list()->GetPageForCategoryType(kTiniest);
1667 if (!page) return nullptr;
1668 RemovePage(page);
1669 return page;
1670 }
1671
1672 size_t PagedSpace::AddPage(Page* page) {
1673 CHECK(page->SweepingDone());
1674 page->set_owner(this);
1675 memory_chunk_list_.PushBack(page);
1676 AccountCommitted(page->size());
1677 IncreaseCapacity(page->area_size());
1678 IncreaseAllocatedBytes(page->allocated_bytes(), page);
1679 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
1680 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1681 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1682 }
1683 return RelinkFreeListCategories(page);
1684 }
1685
1686 void PagedSpace::RemovePage(Page* page) {
1687 CHECK(page->SweepingDone());
1688 memory_chunk_list_.Remove(page);
1689 UnlinkFreeListCategories(page);
1690 DecreaseAllocatedBytes(page->allocated_bytes(), page);
1691 DecreaseCapacity(page->area_size());
1692 AccountUncommitted(page->size());
1693 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
1694 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1695 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1696 }
1697 }
1698
1699 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
1700 size_t unused = page->ShrinkToHighWaterMark();
1701 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1702 AccountUncommitted(unused);
1703 return unused;
1704 }
1705
1706 void PagedSpace::ResetFreeList() {
1707 for (Page* page : *this) {
1708 free_list_.EvictFreeListItems(page);
1709 }
1710 DCHECK(free_list_.IsEmpty());
1711 }
1712
1713 void PagedSpace::ShrinkImmortalImmovablePages() {
1714 DCHECK(!heap()->deserialization_complete());
1715 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1716 FreeLinearAllocationArea();
1717 ResetFreeList();
1718 for (Page* page : *this) {
1719 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1720 ShrinkPageToHighWaterMark(page);
1721 }
1722 }
1723
1724 bool PagedSpace::Expand() {
1725 // Always lock against the main space as we can only adjust capacity and
1726 // pages concurrently for the main paged space.
1727 base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
1728
1729 const int size = AreaSize();
1730
1731 if (!heap()->CanExpandOldGeneration(size)) return false;
1732
1733 Page* page =
1734 heap()->memory_allocator()->AllocatePage(size, this, executable());
1735 if (page == nullptr) return false;
1736 // Pages created during bootstrapping may contain immortal immovable objects.
1737 if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
1738 AddPage(page);
1739 Free(page->area_start(), page->area_size(),
1740 SpaceAccountingMode::kSpaceAccounted);
1741 return true;
1742 }
1743
1744
1745 int PagedSpace::CountTotalPages() {
1746 int count = 0;
1747 for (Page* page : *this) {
1748 count++;
1749 USE(page);
1750 }
1751 return count;
1752 }
1753
1754
1755 void PagedSpace::ResetFreeListStatistics() {
1756 for (Page* page : *this) {
1757 page->ResetFreeListStatistics();
1758 }
1759 }
1760
1761 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
1762 SetTopAndLimit(top, limit);
1763 if (top != kNullAddress && top != limit &&
1764 heap()->incremental_marking()->black_allocation()) {
1765 Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1766 }
1767 }
1768
1769 void PagedSpace::DecreaseLimit(Address new_limit) {
1770 Address old_limit = limit();
1771 DCHECK_LE(top(), new_limit);
1772 DCHECK_GE(old_limit, new_limit);
1773 if (new_limit != old_limit) {
1774 SetTopAndLimit(top(), new_limit);
1775 Free(new_limit, old_limit - new_limit,
1776 SpaceAccountingMode::kSpaceAccounted);
1777 if (heap()->incremental_marking()->black_allocation()) {
1778 Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
1779 old_limit);
1780 }
1781 }
1782 }
1783
1784 Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
1785 size_t min_size) {
1786 DCHECK_GE(end - start, min_size);
1787
1788 if (heap()->inline_allocation_disabled()) {
1789 // Fit the requested area exactly.
1790 return start + min_size;
1791 } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
1792 // Generated code may allocate inline from the linear allocation area for.
1793 // To make sure we can observe these allocations, we use a lower limit.
1794 size_t step = GetNextInlineAllocationStepSize();
1795
1796 // TODO(ofrobots): there is subtle difference between old space and new
1797 // space here. Any way to avoid it? `step - 1` makes more sense as we would
1798 // like to sample the object that straddles the `start + step` boundary.
1799 // Rounding down further would introduce a small statistical error in
1800 // sampling. However, presently PagedSpace requires limit to be aligned.
1801 size_t rounded_step;
1802 if (identity() == NEW_SPACE) {
1803 DCHECK_GE(step, 1);
1804 rounded_step = step - 1;
1805 } else {
1806 rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
1807 }
1808 return Min(static_cast<Address>(start + min_size + rounded_step), end);
1809 } else {
1810 // The entire node can be used as the linear allocation area.
1811 return end;
1812 }
1813 }
1814
1815 void PagedSpace::MarkLinearAllocationAreaBlack() {
1816 DCHECK(heap()->incremental_marking()->black_allocation());
1817 Address current_top = top();
1818 Address current_limit = limit();
1819 if (current_top != kNullAddress && current_top != current_limit) {
1820 Page::FromAllocationAreaAddress(current_top)
1821 ->CreateBlackArea(current_top, current_limit);
1822 }
1823 }
1824
1825 void PagedSpace::UnmarkLinearAllocationArea() {
1826 Address current_top = top();
1827 Address current_limit = limit();
1828 if (current_top != kNullAddress && current_top != current_limit) {
1829 Page::FromAllocationAreaAddress(current_top)
1830 ->DestroyBlackArea(current_top, current_limit);
1831 }
1832 }
1833
1834 void PagedSpace::FreeLinearAllocationArea() {
1835 // Mark the old linear allocation area with a free space map so it can be
1836 // skipped when scanning the heap.
1837 Address current_top = top();
1838 Address current_limit = limit();
1839 if (current_top == kNullAddress) {
1840 DCHECK_EQ(kNullAddress, current_limit);
1841 return;
1842 }
1843
1844 if (heap()->incremental_marking()->black_allocation()) {
1845 Page* page = Page::FromAllocationAreaAddress(current_top);
1846
1847 // Clear the bits in the unused black area.
1848 if (current_top != current_limit) {
1849 IncrementalMarking::MarkingState* marking_state =
1850 heap()->incremental_marking()->marking_state();
1851 marking_state->bitmap(page)->ClearRange(
1852 page->AddressToMarkbitIndex(current_top),
1853 page->AddressToMarkbitIndex(current_limit));
1854 marking_state->IncrementLiveBytes(
1855 page, -static_cast<int>(current_limit - current_top));
1856 }
1857 }
1858
1859 InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
1860 SetTopAndLimit(kNullAddress, kNullAddress);
1861 DCHECK_GE(current_limit, current_top);
1862
1863 // The code page of the linear allocation area needs to be unprotected
1864 // because we are going to write a filler into that memory area below.
1865 if (identity() == CODE_SPACE) {
1866 heap()->UnprotectAndRegisterMemoryChunk(
1867 MemoryChunk::FromAddress(current_top));
1868 }
1869 Free(current_top, current_limit - current_top,
1870 SpaceAccountingMode::kSpaceAccounted);
1871 }
1872
1873 void PagedSpace::ReleasePage(Page* page) {
1874 DCHECK_EQ(
1875 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
1876 page));
1877 DCHECK_EQ(page->owner(), this);
1878
1879 free_list_.EvictFreeListItems(page);
1880 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1881
1882 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1883 DCHECK(!top_on_previous_step_);
1884 allocation_info_.Reset(kNullAddress, kNullAddress);
1885 }
1886
1887 AccountUncommitted(page->size());
1888 accounting_stats_.DecreaseCapacity(page->area_size());
1889 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1890 }
1891
1892 void PagedSpace::SetReadAndExecutable() {
1893 DCHECK(identity() == CODE_SPACE);
1894 for (Page* page : *this) {
1895 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1896 page->SetReadAndExecutable();
1897 }
1898 }
1899
1900 void PagedSpace::SetReadAndWritable() {
1901 DCHECK(identity() == CODE_SPACE);
1902 for (Page* page : *this) {
1903 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1904 page->SetReadAndWritable();
1905 }
1906 }
1907
1908 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1909 return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1910 }
1911
1912 bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
1913 DCHECK(IsAligned(size_in_bytes, kPointerSize));
1914 DCHECK_LE(top(), limit());
1915 #ifdef DEBUG
1916 if (top() != limit()) {
1917 DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
1918 }
1919 #endif
1920 // Don't free list allocate if there is linear space available.
1921 DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
1922
1923 // Mark the old linear allocation area with a free space map so it can be
1924 // skipped when scanning the heap. This also puts it back in the free list
1925 // if it is big enough.
1926 FreeLinearAllocationArea();
1927
1928 if (!is_local()) {
1929 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1930 heap()->GCFlagsForIncrementalMarking(),
1931 kGCCallbackScheduleIdleGarbageCollection);
1932 }
1933
1934 size_t new_node_size = 0;
1935 FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
1936 if (new_node == nullptr) return false;
1937
1938 DCHECK_GE(new_node_size, size_in_bytes);
1939
1940 // The old-space-step might have finished sweeping and restarted marking.
1941 // Verify that it did not turn the page of the new node into an evacuation
1942 // candidate.
1943 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1944
1945 // Memory in the linear allocation area is counted as allocated. We may free
1946 // a little of this again immediately - see below.
1947 Page* page = Page::FromAddress(new_node->address());
1948 IncreaseAllocatedBytes(new_node_size, page);
1949
1950 Address start = new_node->address();
1951 Address end = new_node->address() + new_node_size;
1952 Address limit = ComputeLimit(start, end, size_in_bytes);
1953 DCHECK_LE(limit, end);
1954 DCHECK_LE(size_in_bytes, limit - start);
1955 if (limit != end) {
1956 if (identity() == CODE_SPACE) {
1957 heap()->UnprotectAndRegisterMemoryChunk(page);
1958 }
1959 Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
1960 }
1961 SetLinearAllocationArea(start, limit);
1962
1963 return true;
1964 }
1965
1966 #ifdef DEBUG
1967 void PagedSpace::Print() {}
1968 #endif
1969
1970 #ifdef VERIFY_HEAP
1971 void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
1972 bool allocation_pointer_found_in_space =
1973 (allocation_info_.top() == allocation_info_.limit());
1974 size_t external_space_bytes[kNumTypes];
1975 size_t external_page_bytes[kNumTypes];
1976
1977 for (int i = 0; i < kNumTypes; i++) {
1978 external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
1979 }
1980
1981 for (Page* page : *this) {
1982 CHECK(page->owner() == this);
1983
1984 for (int i = 0; i < kNumTypes; i++) {
1985 external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
1986 }
1987
1988 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1989 allocation_pointer_found_in_space = true;
1990 }
1991 CHECK(page->SweepingDone());
1992 HeapObjectIterator it(page);
1993 Address end_of_previous_object = page->area_start();
1994 Address top = page->area_end();
1995
1996 for (HeapObject* object = it.Next(); object != nullptr;
1997 object = it.Next()) {
1998 CHECK(end_of_previous_object <= object->address());
1999
2000 // The first word should be a map, and we expect all map pointers to
2001 // be in map space.
2002 Map* map = object->map();
2003 CHECK(map->IsMap());
2004 CHECK(heap()->map_space()->Contains(map) ||
2005 heap()->read_only_space()->Contains(map));
2006
2007 // Perform space-specific object verification.
2008 VerifyObject(object);
2009
2010 // The object itself should look OK.
2011 object->ObjectVerify(isolate);
2012
2013 if (!FLAG_verify_heap_skip_remembered_set) {
2014 heap()->VerifyRememberedSetFor(object);
2015 }
2016
2017 // All the interior pointers should be contained in the heap.
2018 int size = object->Size();
2019 object->IterateBody(map, size, visitor);
2020 CHECK(object->address() + size <= top);
2021 end_of_previous_object = object->address() + size;
2022
2023 if (object->IsExternalString()) {
2024 ExternalString* external_string = ExternalString::cast(object);
2025 size_t size = external_string->ExternalPayloadSize();
2026 external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
2027 } else if (object->IsJSArrayBuffer()) {
2028 JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
2029 if (ArrayBufferTracker::IsTracked(array_buffer)) {
2030 size_t size = NumberToSize(array_buffer->byte_length());
2031 external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
2032 }
2033 }
2034 }
2035 for (int i = 0; i < kNumTypes; i++) {
2036 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2037 CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
2038 external_space_bytes[t] += external_page_bytes[t];
2039 }
2040 }
2041 for (int i = 0; i < kNumTypes; i++) {
2042 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2043 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
2044 }
2045 CHECK(allocation_pointer_found_in_space);
2046 #ifdef DEBUG
2047 VerifyCountersAfterSweeping();
2048 #endif
2049 }
2050
2051 void PagedSpace::VerifyLiveBytes() {
2052 IncrementalMarking::MarkingState* marking_state =
2053 heap()->incremental_marking()->marking_state();
2054 for (Page* page : *this) {
2055 CHECK(page->SweepingDone());
2056 HeapObjectIterator it(page);
2057 int black_size = 0;
2058 for (HeapObject* object = it.Next(); object != nullptr;
2059 object = it.Next()) {
2060 // All the interior pointers should be contained in the heap.
2061 if (marking_state->IsBlack(object)) {
2062 black_size += object->Size();
2063 }
2064 }
2065 CHECK_LE(black_size, marking_state->live_bytes(page));
2066 }
2067 }
2068 #endif // VERIFY_HEAP
2069
2070 #ifdef DEBUG
2071 void PagedSpace::VerifyCountersAfterSweeping() {
2072 size_t total_capacity = 0;
2073 size_t total_allocated = 0;
2074 for (Page* page : *this) {
2075 DCHECK(page->SweepingDone());
2076 total_capacity += page->area_size();
2077 HeapObjectIterator it(page);
2078 size_t real_allocated = 0;
2079 for (HeapObject* object = it.Next(); object != nullptr;
2080 object = it.Next()) {
2081 if (!object->IsFiller()) {
2082 real_allocated += object->Size();
2083 }
2084 }
2085 total_allocated += page->allocated_bytes();
2086 // The real size can be smaller than the accounted size if array trimming,
2087 // object slack tracking happened after sweeping.
2088 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
2089 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
2090 }
2091 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2092 DCHECK_EQ(total_allocated, accounting_stats_.Size());
2093 }
2094
2095 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
2096 // We need to refine the counters on pages that are already swept and have
2097 // not been moved over to the actual space. Otherwise, the AccountingStats
2098 // are just an over approximation.
2099 RefillFreeList();
2100
2101 size_t total_capacity = 0;
2102 size_t total_allocated = 0;
2103 auto marking_state =
2104 heap()->incremental_marking()->non_atomic_marking_state();
2105 for (Page* page : *this) {
2106 size_t page_allocated =
2107 page->SweepingDone()
2108 ? page->allocated_bytes()
2109 : static_cast<size_t>(marking_state->live_bytes(page));
2110 total_capacity += page->area_size();
2111 total_allocated += page_allocated;
2112 DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
2113 }
2114 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2115 DCHECK_EQ(total_allocated, accounting_stats_.Size());
2116 }
2117 #endif
2118
2119 // -----------------------------------------------------------------------------
2120 // NewSpace implementation
2121
2122 NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity,
2123 size_t max_semispace_capacity)
2124 : SpaceWithLinearArea(heap, NEW_SPACE),
2125 to_space_(heap, kToSpace),
2126 from_space_(heap, kFromSpace),
2127 reservation_() {
2128 DCHECK(initial_semispace_capacity <= max_semispace_capacity);
2129 DCHECK(
2130 base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
2131
2132 to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2133 from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2134 if (!to_space_.Commit()) {
2135 V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
2136 }
2137 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
2138 ResetLinearAllocationArea();
2139 }
2140
2141 void NewSpace::TearDown() {
2142 allocation_info_.Reset(kNullAddress, kNullAddress);
2143
2144 to_space_.TearDown();
2145 from_space_.TearDown();
2146 }
2147
2148 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
2149
2150
2151 void NewSpace::Grow() {
2152 // Double the semispace size but only up to maximum capacity.
2153 DCHECK(TotalCapacity() < MaximumCapacity());
2154 size_t new_capacity =
2155 Min(MaximumCapacity(),
2156 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
2157 if (to_space_.GrowTo(new_capacity)) {
2158 // Only grow from space if we managed to grow to-space.
2159 if (!from_space_.GrowTo(new_capacity)) {
2160 // If we managed to grow to-space but couldn't grow from-space,
2161 // attempt to shrink to-space.
2162 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
2163 // We are in an inconsistent state because we could not
2164 // commit/uncommit memory from new space.
2165 FATAL("inconsistent state");
2166 }
2167 }
2168 }
2169 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2170 }
2171
2172
2173 void NewSpace::Shrink() {
2174 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
2175 size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
2176 if (rounded_new_capacity < TotalCapacity() &&
2177 to_space_.ShrinkTo(rounded_new_capacity)) {
2178 // Only shrink from-space if we managed to shrink to-space.
2179 from_space_.Reset();
2180 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
2181 // If we managed to shrink to-space but couldn't shrink from
2182 // space, attempt to grow to-space again.
2183 if (!to_space_.GrowTo(from_space_.current_capacity())) {
2184 // We are in an inconsistent state because we could not
2185 // commit/uncommit memory from new space.
2186 FATAL("inconsistent state");
2187 }
2188 }
2189 }
2190 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2191 }
2192
2193 bool NewSpace::Rebalance() {
2194 // Order here is important to make use of the page pool.
2195 return to_space_.EnsureCurrentCapacity() &&
2196 from_space_.EnsureCurrentCapacity();
2197 }
2198
2199 bool SemiSpace::EnsureCurrentCapacity() {
2200 if (is_committed()) {
2201 const int expected_pages =
2202 static_cast<int>(current_capacity_ / Page::kPageSize);
2203 MemoryChunk* current_page = first_page();
2204 int actual_pages = 0;
2205
2206 // First iterate through the pages list until expected pages if so many
2207 // pages exist.
2208 while (current_page != nullptr && actual_pages < expected_pages) {
2209 actual_pages++;
2210 current_page = current_page->list_node().next();
2211 }
2212
2213 // Free all overallocated pages which are behind current_page.
2214 while (current_page) {
2215 MemoryChunk* next_current = current_page->list_node().next();
2216 memory_chunk_list_.Remove(current_page);
2217 // Clear new space flags to avoid this page being treated as a new
2218 // space page that is potentially being swept.
2219 current_page->SetFlags(0, Page::kIsInNewSpaceMask);
2220 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2221 current_page);
2222 current_page = next_current;
2223 }
2224
2225 // Add more pages if we have less than expected_pages.
2226 IncrementalMarking::NonAtomicMarkingState* marking_state =
2227 heap()->incremental_marking()->non_atomic_marking_state();
2228 while (actual_pages < expected_pages) {
2229 actual_pages++;
2230 current_page =
2231 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2232 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2233 if (current_page == nullptr) return false;
2234 DCHECK_NOT_NULL(current_page);
2235 memory_chunk_list_.PushBack(current_page);
2236 marking_state->ClearLiveness(current_page);
2237 current_page->SetFlags(first_page()->GetFlags(),
2238 static_cast<uintptr_t>(Page::kCopyAllFlags));
2239 heap()->CreateFillerObjectAt(current_page->area_start(),
2240 static_cast<int>(current_page->area_size()),
2241 ClearRecordedSlots::kNo);
2242 }
2243 }
2244 return true;
2245 }
2246
2247 LinearAllocationArea LocalAllocationBuffer::Close() {
2248 if (IsValid()) {
2249 heap_->CreateFillerObjectAt(
2250 allocation_info_.top(),
2251 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2252 ClearRecordedSlots::kNo);
2253 const LinearAllocationArea old_info = allocation_info_;
2254 allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
2255 return old_info;
2256 }
2257 return LinearAllocationArea(kNullAddress, kNullAddress);
2258 }
2259
2260 LocalAllocationBuffer::LocalAllocationBuffer(
2261 Heap* heap, LinearAllocationArea allocation_info)
2262 : heap_(heap), allocation_info_(allocation_info) {
2263 if (IsValid()) {
2264 heap_->CreateFillerObjectAt(
2265 allocation_info_.top(),
2266 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2267 ClearRecordedSlots::kNo);
2268 }
2269 }
2270
2271
2272 LocalAllocationBuffer::LocalAllocationBuffer(
2273 const LocalAllocationBuffer& other) {
2274 *this = other;
2275 }
2276
2277
2278 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2279 const LocalAllocationBuffer& other) {
2280 Close();
2281 heap_ = other.heap_;
2282 allocation_info_ = other.allocation_info_;
2283
2284 // This is needed since we (a) cannot yet use move-semantics, and (b) want
2285 // to make the use of the class easy by it as value and (c) implicitly call
2286 // {Close} upon copy.
2287 const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
2288 kNullAddress, kNullAddress);
2289 return *this;
2290 }
2291
2292 void NewSpace::UpdateLinearAllocationArea() {
2293 // Make sure there is no unaccounted allocations.
2294 DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
2295
2296 Address new_top = to_space_.page_low();
2297 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2298 allocation_info_.Reset(new_top, to_space_.page_high());
2299 original_top_ = top();
2300 original_limit_ = limit();
2301 StartNextInlineAllocationStep();
2302 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2303 }
2304
2305 void NewSpace::ResetLinearAllocationArea() {
2306 // Do a step to account for memory allocated so far before resetting.
2307 InlineAllocationStep(top(), top(), kNullAddress, 0);
2308 to_space_.Reset();
2309 UpdateLinearAllocationArea();
2310 // Clear all mark-bits in the to-space.
2311 IncrementalMarking::NonAtomicMarkingState* marking_state =
2312 heap()->incremental_marking()->non_atomic_marking_state();
2313 for (Page* p : to_space_) {
2314 marking_state->ClearLiveness(p);
2315 // Concurrent marking may have local live bytes for this page.
2316 heap()->concurrent_marking()->ClearLiveness(p);
2317 }
2318 }
2319
2320 void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
2321 Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
2322 allocation_info_.set_limit(new_limit);
2323 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2324 }
2325
2326 void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
2327 Address new_limit = ComputeLimit(top(), limit(), min_size);
2328 DCHECK_LE(new_limit, limit());
2329 DecreaseLimit(new_limit);
2330 }
2331
2332 bool NewSpace::AddFreshPage() {
2333 Address top = allocation_info_.top();
2334 DCHECK(!Page::IsAtObjectStart(top));
2335
2336 // Do a step to account for memory allocated on previous page.
2337 InlineAllocationStep(top, top, kNullAddress, 0);
2338
2339 if (!to_space_.AdvancePage()) {
2340 // No more pages left to advance.
2341 return false;
2342 }
2343
2344 // Clear remainder of current page.
2345 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2346 int remaining_in_page = static_cast<int>(limit - top);
2347 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2348 UpdateLinearAllocationArea();
2349
2350 return true;
2351 }
2352
2353
2354 bool NewSpace::AddFreshPageSynchronized() {
2355 base::LockGuard<base::Mutex> guard(&mutex_);
2356 return AddFreshPage();
2357 }
2358
2359
2360 bool NewSpace::EnsureAllocation(int size_in_bytes,
2361 AllocationAlignment alignment) {
2362 Address old_top = allocation_info_.top();
2363 Address high = to_space_.page_high();
2364 int filler_size = Heap::GetFillToAlign(old_top, alignment);
2365 int aligned_size_in_bytes = size_in_bytes + filler_size;
2366
2367 if (old_top + aligned_size_in_bytes > high) {
2368 // Not enough room in the page, try to allocate a new one.
2369 if (!AddFreshPage()) {
2370 return false;
2371 }
2372
2373 old_top = allocation_info_.top();
2374 high = to_space_.page_high();
2375 filler_size = Heap::GetFillToAlign(old_top, alignment);
2376 }
2377
2378 DCHECK(old_top + aligned_size_in_bytes <= high);
2379
2380 if (allocation_info_.limit() < high) {
2381 // Either the limit has been lowered because linear allocation was disabled
2382 // or because incremental marking wants to get a chance to do a step,
2383 // or because idle scavenge job wants to get a chance to post a task.
2384 // Set the new limit accordingly.
2385 Address new_top = old_top + aligned_size_in_bytes;
2386 Address soon_object = old_top + filler_size;
2387 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2388 UpdateInlineAllocationLimit(aligned_size_in_bytes);
2389 }
2390 return true;
2391 }
2392
2393 size_t LargeObjectSpace::Available() {
2394 return ObjectSizeFor(heap()->memory_allocator()->Available());
2395 }
2396
2397 void SpaceWithLinearArea::StartNextInlineAllocationStep() {
2398 if (heap()->allocation_step_in_progress()) {
2399 // If we are mid-way through an existing step, don't start a new one.
2400 return;
2401 }
2402
2403 if (AllocationObserversActive()) {
2404 top_on_previous_step_ = top();
2405 UpdateInlineAllocationLimit(0);
2406 } else {
2407 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2408 }
2409 }
2410
2411 void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
2412 InlineAllocationStep(top(), top(), kNullAddress, 0);
2413 Space::AddAllocationObserver(observer);
2414 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2415 }
2416
2417 void SpaceWithLinearArea::RemoveAllocationObserver(
2418 AllocationObserver* observer) {
2419 Address top_for_next_step =
2420 allocation_observers_.size() == 1 ? kNullAddress : top();
2421 InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
2422 Space::RemoveAllocationObserver(observer);
2423 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2424 }
2425
2426 void SpaceWithLinearArea::PauseAllocationObservers() {
2427 // Do a step to account for memory allocated so far.
2428 InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
2429 Space::PauseAllocationObservers();
2430 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2431 UpdateInlineAllocationLimit(0);
2432 }
2433
2434 void SpaceWithLinearArea::ResumeAllocationObservers() {
2435 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2436 Space::ResumeAllocationObservers();
2437 StartNextInlineAllocationStep();
2438 }
2439
2440 void SpaceWithLinearArea::InlineAllocationStep(Address top,
2441 Address top_for_next_step,
2442 Address soon_object,
2443 size_t size) {
2444 if (heap()->allocation_step_in_progress()) {
2445 // Avoid starting a new step if we are mid-way through an existing one.
2446 return;
2447 }
2448
2449 if (top_on_previous_step_) {
2450 if (top < top_on_previous_step_) {
2451 // Generated code decreased the top pointer to do folded allocations.
2452 DCHECK_NE(top, kNullAddress);
2453 DCHECK_EQ(Page::FromAllocationAreaAddress(top),
2454 Page::FromAllocationAreaAddress(top_on_previous_step_));
2455 top_on_previous_step_ = top;
2456 }
2457 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
2458 AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
2459 top_on_previous_step_ = top_for_next_step;
2460 }
2461 }
2462
2463 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
2464 return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
2465 }
2466
2467 #ifdef VERIFY_HEAP
2468 // We do not use the SemiSpaceIterator because verification doesn't assume
2469 // that it works (it depends on the invariants we are checking).
2470 void NewSpace::Verify(Isolate* isolate) {
2471 // The allocation pointer should be in the space or at the very end.
2472 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2473
2474 // There should be objects packed in from the low address up to the
2475 // allocation pointer.
2476 Address current = to_space_.first_page()->area_start();
2477 CHECK_EQ(current, to_space_.space_start());
2478
2479 size_t external_space_bytes[kNumTypes];
2480 for (int i = 0; i < kNumTypes; i++) {
2481 external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
2482 }
2483
2484 while (current != top()) {
2485 if (!Page::IsAlignedToPageSize(current)) {
2486 // The allocation pointer should not be in the middle of an object.
2487 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2488 current < top());
2489
2490 HeapObject* object = HeapObject::FromAddress(current);
2491
2492 // The first word should be a map, and we expect all map pointers to
2493 // be in map space or read-only space.
2494 Map* map = object->map();
2495 CHECK(map->IsMap());
2496 CHECK(heap()->map_space()->Contains(map) ||
2497 heap()->read_only_space()->Contains(map));
2498
2499 // The object should not be code or a map.
2500 CHECK(!object->IsMap());
2501 CHECK(!object->IsAbstractCode());
2502
2503 // The object itself should look OK.
2504 object->ObjectVerify(isolate);
2505
2506 // All the interior pointers should be contained in the heap.
2507 VerifyPointersVisitor visitor(heap());
2508 int size = object->Size();
2509 object->IterateBody(map, size, &visitor);
2510
2511 if (object->IsExternalString()) {
2512 ExternalString* external_string = ExternalString::cast(object);
2513 size_t size = external_string->ExternalPayloadSize();
2514 external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
2515 } else if (object->IsJSArrayBuffer()) {
2516 JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
2517 if (ArrayBufferTracker::IsTracked(array_buffer)) {
2518 size_t size = NumberToSize(array_buffer->byte_length());
2519 external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
2520 }
2521 }
2522
2523 current += size;
2524 } else {
2525 // At end of page, switch to next page.
2526 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2527 current = page->area_start();
2528 }
2529 }
2530
2531 for (int i = 0; i < kNumTypes; i++) {
2532 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2533 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
2534 }
2535
2536 // Check semi-spaces.
2537 CHECK_EQ(from_space_.id(), kFromSpace);
2538 CHECK_EQ(to_space_.id(), kToSpace);
2539 from_space_.Verify();
2540 to_space_.Verify();
2541 }
2542 #endif
2543
2544 // -----------------------------------------------------------------------------
2545 // SemiSpace implementation
2546
2547 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
2548 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
2549 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
2550 current_capacity_ = minimum_capacity_;
2551 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2552 committed_ = false;
2553 }
2554
2555
2556 void SemiSpace::TearDown() {
2557 // Properly uncommit memory to keep the allocator counters in sync.
2558 if (is_committed()) {
2559 Uncommit();
2560 }
2561 current_capacity_ = maximum_capacity_ = 0;
2562 }
2563
2564
2565 bool SemiSpace::Commit() {
2566 DCHECK(!is_committed());
2567 const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
2568 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
2569 Page* new_page =
2570 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2571 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2572 if (new_page == nullptr) {
2573 if (pages_added) RewindPages(pages_added);
2574 return false;
2575 }
2576 memory_chunk_list_.PushBack(new_page);
2577 }
2578 Reset();
2579 AccountCommitted(current_capacity_);
2580 if (age_mark_ == kNullAddress) {
2581 age_mark_ = first_page()->area_start();
2582 }
2583 committed_ = true;
2584 return true;
2585 }
2586
2587
2588 bool SemiSpace::Uncommit() {
2589 DCHECK(is_committed());
2590 while (!memory_chunk_list_.Empty()) {
2591 MemoryChunk* chunk = memory_chunk_list_.front();
2592 memory_chunk_list_.Remove(chunk);
2593 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
2594 }
2595 current_page_ = nullptr;
2596 AccountUncommitted(current_capacity_);
2597 committed_ = false;
2598 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2599 return true;
2600 }
2601
2602
2603 size_t SemiSpace::CommittedPhysicalMemory() {
2604 if (!is_committed()) return 0;
2605 size_t size = 0;
2606 for (Page* p : *this) {
2607 size += p->CommittedPhysicalMemory();
2608 }
2609 return size;
2610 }
2611
2612 bool SemiSpace::GrowTo(size_t new_capacity) {
2613 if (!is_committed()) {
2614 if (!Commit()) return false;
2615 }
2616 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2617 DCHECK_LE(new_capacity, maximum_capacity_);
2618 DCHECK_GT(new_capacity, current_capacity_);
2619 const size_t delta = new_capacity - current_capacity_;
2620 DCHECK(IsAligned(delta, AllocatePageSize()));
2621 const int delta_pages = static_cast<int>(delta / Page::kPageSize);
2622 DCHECK(last_page());
2623 IncrementalMarking::NonAtomicMarkingState* marking_state =
2624 heap()->incremental_marking()->non_atomic_marking_state();
2625 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
2626 Page* new_page =
2627 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2628 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2629 if (new_page == nullptr) {
2630 if (pages_added) RewindPages(pages_added);
2631 return false;
2632 }
2633 memory_chunk_list_.PushBack(new_page);
2634 marking_state->ClearLiveness(new_page);
2635 // Duplicate the flags that was set on the old page.
2636 new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
2637 }
2638 AccountCommitted(delta);
2639 current_capacity_ = new_capacity;
2640 return true;
2641 }
2642
2643 void SemiSpace::RewindPages(int num_pages) {
2644 DCHECK_GT(num_pages, 0);
2645 DCHECK(last_page());
2646 while (num_pages > 0) {
2647 MemoryChunk* last = last_page();
2648 memory_chunk_list_.Remove(last);
2649 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
2650 num_pages--;
2651 }
2652 }
2653
2654 bool SemiSpace::ShrinkTo(size_t new_capacity) {
2655 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2656 DCHECK_GE(new_capacity, minimum_capacity_);
2657 DCHECK_LT(new_capacity, current_capacity_);
2658 if (is_committed()) {
2659 const size_t delta = current_capacity_ - new_capacity;
2660 DCHECK(IsAligned(delta, Page::kPageSize));
2661 int delta_pages = static_cast<int>(delta / Page::kPageSize);
2662 RewindPages(delta_pages);
2663 AccountUncommitted(delta);
2664 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2665 }
2666 current_capacity_ = new_capacity;
2667 return true;
2668 }
2669
2670 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2671 for (Page* page : *this) {
2672 page->set_owner(this);
2673 page->SetFlags(flags, mask);
2674 if (id_ == kToSpace) {
2675 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2676 page->SetFlag(MemoryChunk::IN_TO_SPACE);
2677 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2678 heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
2679 page, 0);
2680 } else {
2681 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2682 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2683 }
2684 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2685 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2686 }
2687 }
2688
2689
2690 void SemiSpace::Reset() {
2691 DCHECK(first_page());
2692 DCHECK(last_page());
2693 current_page_ = first_page();
2694 pages_used_ = 0;
2695 }
2696
2697 void SemiSpace::RemovePage(Page* page) {
2698 if (current_page_ == page) {
2699 if (page->prev_page()) {
2700 current_page_ = page->prev_page();
2701 }
2702 }
2703 memory_chunk_list_.Remove(page);
2704 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
2705 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2706 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2707 }
2708 }
2709
2710 void SemiSpace::PrependPage(Page* page) {
2711 page->SetFlags(current_page()->GetFlags(),
2712 static_cast<uintptr_t>(Page::kCopyAllFlags));
2713 page->set_owner(this);
2714 memory_chunk_list_.PushFront(page);
2715 pages_used_++;
2716 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
2717 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2718 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2719 }
2720 }
2721
2722 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2723 // We won't be swapping semispaces without data in them.
2724 DCHECK(from->first_page());
2725 DCHECK(to->first_page());
2726
2727 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2728
2729 // We swap all properties but id_.
2730 std::swap(from->current_capacity_, to->current_capacity_);
2731 std::swap(from->maximum_capacity_, to->maximum_capacity_);
2732 std::swap(from->minimum_capacity_, to->minimum_capacity_);
2733 std::swap(from->age_mark_, to->age_mark_);
2734 std::swap(from->committed_, to->committed_);
2735 std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
2736 std::swap(from->current_page_, to->current_page_);
2737 std::swap(from->external_backing_store_bytes_,
2738 to->external_backing_store_bytes_);
2739
2740 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2741 from->FixPagesFlags(0, 0);
2742 }
2743
2744 void SemiSpace::set_age_mark(Address mark) {
2745 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2746 age_mark_ = mark;
2747 // Mark all pages up to the one containing mark.
2748 for (Page* p : PageRange(space_start(), mark)) {
2749 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2750 }
2751 }
2752
2753 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2754 // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2755 UNREACHABLE();
2756 }
2757
2758 #ifdef DEBUG
2759 void SemiSpace::Print() {}
2760 #endif
2761
2762 #ifdef VERIFY_HEAP
2763 void SemiSpace::Verify() {
2764 bool is_from_space = (id_ == kFromSpace);
2765 size_t external_backing_store_bytes[kNumTypes];
2766
2767 for (int i = 0; i < kNumTypes; i++) {
2768 external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
2769 }
2770
2771 for (Page* page : *this) {
2772 CHECK_EQ(page->owner(), this);
2773 CHECK(page->InNewSpace());
2774 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2775 : MemoryChunk::IN_TO_SPACE));
2776 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2777 : MemoryChunk::IN_FROM_SPACE));
2778 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2779 if (!is_from_space) {
2780 // The pointers-from-here-are-interesting flag isn't updated dynamically
2781 // on from-space pages, so it might be out of sync with the marking state.
2782 if (page->heap()->incremental_marking()->IsMarking()) {
2783 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2784 } else {
2785 CHECK(
2786 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2787 }
2788 }
2789 for (int i = 0; i < kNumTypes; i++) {
2790 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2791 external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
2792 }
2793
2794 CHECK_IMPLIES(page->list_node().prev(),
2795 page->list_node().prev()->list_node().next() == page);
2796 }
2797 for (int i = 0; i < kNumTypes; i++) {
2798 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2799 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
2800 }
2801 }
2802 #endif
2803
2804 #ifdef DEBUG
2805 void SemiSpace::AssertValidRange(Address start, Address end) {
2806 // Addresses belong to same semi-space
2807 Page* page = Page::FromAllocationAreaAddress(start);
2808 Page* end_page = Page::FromAllocationAreaAddress(end);
2809 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2810 DCHECK_EQ(space, end_page->owner());
2811 // Start address is before end address, either on same page,
2812 // or end address is on a later page in the linked list of
2813 // semi-space pages.
2814 if (page == end_page) {
2815 DCHECK_LE(start, end);
2816 } else {
2817 while (page != end_page) {
2818 page = page->next_page();
2819 }
2820 DCHECK(page);
2821 }
2822 }
2823 #endif
2824
2825
2826 // -----------------------------------------------------------------------------
2827 // SemiSpaceIterator implementation.
2828
2829 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2830 Initialize(space->first_allocatable_address(), space->top());
2831 }
2832
2833
2834 void SemiSpaceIterator::Initialize(Address start, Address end) {
2835 SemiSpace::AssertValidRange(start, end);
2836 current_ = start;
2837 limit_ = end;
2838 }
2839
2840 size_t NewSpace::CommittedPhysicalMemory() {
2841 if (!base::OS::HasLazyCommits()) return CommittedMemory();
2842 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2843 size_t size = to_space_.CommittedPhysicalMemory();
2844 if (from_space_.is_committed()) {
2845 size += from_space_.CommittedPhysicalMemory();
2846 }
2847 return size;
2848 }
2849
2850
2851 // -----------------------------------------------------------------------------
2852 // Free lists for old object spaces implementation
2853
2854
2855 void FreeListCategory::Reset() {
2856 set_top(nullptr);
2857 set_prev(nullptr);
2858 set_next(nullptr);
2859 available_ = 0;
2860 }
2861
2862 FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
2863 size_t* node_size) {
2864 DCHECK(page()->CanAllocate());
2865 FreeSpace* node = top();
2866 if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
2867 *node_size = 0;
2868 return nullptr;
2869 }
2870 set_top(node->next());
2871 *node_size = node->Size();
2872 available_ -= *node_size;
2873 return node;
2874 }
2875
2876 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2877 size_t* node_size) {
2878 DCHECK(page()->CanAllocate());
2879 FreeSpace* prev_non_evac_node = nullptr;
2880 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2881 cur_node = cur_node->next()) {
2882 size_t size = cur_node->size();
2883 if (size >= minimum_size) {
2884 DCHECK_GE(available_, size);
2885 available_ -= size;
2886 if (cur_node == top()) {
2887 set_top(cur_node->next());
2888 }
2889 if (prev_non_evac_node != nullptr) {
2890 MemoryChunk* chunk =
2891 MemoryChunk::FromAddress(prev_non_evac_node->address());
2892 if (chunk->owner()->identity() == CODE_SPACE) {
2893 chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
2894 }
2895 prev_non_evac_node->set_next(cur_node->next());
2896 }
2897 *node_size = size;
2898 return cur_node;
2899 }
2900
2901 prev_non_evac_node = cur_node;
2902 }
2903 return nullptr;
2904 }
2905
2906 void FreeListCategory::Free(Address start, size_t size_in_bytes,
2907 FreeMode mode) {
2908 DCHECK(page()->CanAllocate());
2909 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2910 free_space->set_next(top());
2911 set_top(free_space);
2912 available_ += size_in_bytes;
2913 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2914 owner()->AddCategory(this);
2915 }
2916 }
2917
2918
2919 void FreeListCategory::RepairFreeList(Heap* heap) {
2920 FreeSpace* n = top();
2921 while (n != nullptr) {
2922 Map** map_location = reinterpret_cast<Map**>(n->address());
2923 if (*map_location == nullptr) {
2924 *map_location = ReadOnlyRoots(heap).free_space_map();
2925 } else {
2926 DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
2927 }
2928 n = n->next();
2929 }
2930 }
2931
2932 void FreeListCategory::Relink() {
2933 DCHECK(!is_linked());
2934 owner()->AddCategory(this);
2935 }
2936
2937 FreeList::FreeList() : wasted_bytes_(0) {
2938 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2939 categories_[i] = nullptr;
2940 }
2941 Reset();
2942 }
2943
2944
2945 void FreeList::Reset() {
2946 ForAllFreeListCategories(
2947 [](FreeListCategory* category) { category->Reset(); });
2948 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2949 categories_[i] = nullptr;
2950 }
2951 ResetStats();
2952 }
2953
2954 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2955 Page* page = Page::FromAddress(start);
2956 page->DecreaseAllocatedBytes(size_in_bytes);
2957
2958 // Blocks have to be a minimum size to hold free list items.
2959 if (size_in_bytes < kMinBlockSize) {
2960 page->add_wasted_memory(size_in_bytes);
2961 wasted_bytes_ += size_in_bytes;
2962 return size_in_bytes;
2963 }
2964
2965 // Insert other blocks at the head of a free list of the appropriate
2966 // magnitude.
2967 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2968 page->free_list_category(type)->Free(start, size_in_bytes, mode);
2969 DCHECK_EQ(page->AvailableInFreeList(),
2970 page->AvailableInFreeListFromAllocatedBytes());
2971 return 0;
2972 }
2973
2974 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
2975 size_t* node_size) {
2976 FreeListCategoryIterator it(this, type);
2977 FreeSpace* node = nullptr;
2978 while (it.HasNext()) {
2979 FreeListCategory* current = it.Next();
2980 node = current->PickNodeFromList(minimum_size, node_size);
2981 if (node != nullptr) {
2982 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2983 return node;
2984 }
2985 RemoveCategory(current);
2986 }
2987 return node;
2988 }
2989
2990 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
2991 size_t minimum_size, size_t* node_size) {
2992 if (categories_[type] == nullptr) return nullptr;
2993 FreeSpace* node =
2994 categories_[type]->PickNodeFromList(minimum_size, node_size);
2995 if (node != nullptr) {
2996 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2997 }
2998 return node;
2999 }
3000
3001 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
3002 size_t* node_size,
3003 size_t minimum_size) {
3004 FreeListCategoryIterator it(this, type);
3005 FreeSpace* node = nullptr;
3006 while (it.HasNext()) {
3007 FreeListCategory* current = it.Next();
3008 node = current->SearchForNodeInList(minimum_size, node_size);
3009 if (node != nullptr) {
3010 DCHECK(IsVeryLong() || Available() == SumFreeLists());
3011 return node;
3012 }
3013 if (current->is_empty()) {
3014 RemoveCategory(current);
3015 }
3016 }
3017 return node;
3018 }
3019
3020 FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
3021 DCHECK_GE(kMaxBlockSize, size_in_bytes);
3022 FreeSpace* node = nullptr;
3023 // First try the allocation fast path: try to allocate the minimum element
3024 // size of a free list category. This operation is constant time.
3025 FreeListCategoryType type =
3026 SelectFastAllocationFreeListCategoryType(size_in_bytes);
3027 for (int i = type; i < kHuge && node == nullptr; i++) {
3028 node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
3029 node_size);
3030 }
3031
3032 if (node == nullptr) {
3033 // Next search the huge list for free list nodes. This takes linear time in
3034 // the number of huge elements.
3035 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
3036 }
3037
3038 if (node == nullptr && type != kHuge) {
3039 // We didn't find anything in the huge list. Now search the best fitting
3040 // free list for a node that has at least the requested size.
3041 type = SelectFreeListCategoryType(size_in_bytes);
3042 node = TryFindNodeIn(type, size_in_bytes, node_size);
3043 }
3044
3045 if (node != nullptr) {
3046 Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
3047 }
3048
3049 DCHECK(IsVeryLong() || Available() == SumFreeLists());
3050 return node;
3051 }
3052
3053 size_t FreeList::EvictFreeListItems(Page* page) {
3054 size_t sum = 0;
3055 page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
3056 DCHECK_EQ(this, category->owner());
3057 sum += category->available();
3058 RemoveCategory(category);
3059 category->Reset();
3060 });
3061 return sum;
3062 }
3063
3064 bool FreeList::ContainsPageFreeListItems(Page* page) {
3065 bool contained = false;
3066 page->ForAllFreeListCategories(
3067 [this, &contained](FreeListCategory* category) {
3068 if (category->owner() == this && category->is_linked()) {
3069 contained = true;
3070 }
3071 });
3072 return contained;
3073 }
3074
3075 void FreeList::RepairLists(Heap* heap) {
3076 ForAllFreeListCategories(
3077 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
3078 }
3079
3080 bool FreeList::AddCategory(FreeListCategory* category) {
3081 FreeListCategoryType type = category->type_;
3082 DCHECK_LT(type, kNumberOfCategories);
3083 FreeListCategory* top = categories_[type];
3084
3085 if (category->is_empty()) return false;
3086 if (top == category) return false;
3087
3088 // Common double-linked list insertion.
3089 if (top != nullptr) {
3090 top->set_prev(category);
3091 }
3092 category->set_next(top);
3093 categories_[type] = category;
3094 return true;
3095 }
3096
3097 void FreeList::RemoveCategory(FreeListCategory* category) {
3098 FreeListCategoryType type = category->type_;
3099 DCHECK_LT(type, kNumberOfCategories);
3100 FreeListCategory* top = categories_[type];
3101
3102 // Common double-linked list removal.
3103 if (top == category) {
3104 categories_[type] = category->next();
3105 }
3106 if (category->prev() != nullptr) {
3107 category->prev()->set_next(category->next());
3108 }
3109 if (category->next() != nullptr) {
3110 category->next()->set_prev(category->prev());
3111 }
3112 category->set_next(nullptr);
3113 category->set_prev(nullptr);
3114 }
3115
3116 void FreeList::PrintCategories(FreeListCategoryType type) {
3117 FreeListCategoryIterator it(this, type);
3118 PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
3119 static_cast<void*>(categories_[type]), type);
3120 while (it.HasNext()) {
3121 FreeListCategory* current = it.Next();
3122 PrintF("%p -> ", static_cast<void*>(current));
3123 }
3124 PrintF("null\n");
3125 }
3126
3127
3128 #ifdef DEBUG
3129 size_t FreeListCategory::SumFreeList() {
3130 size_t sum = 0;
3131 FreeSpace* cur = top();
3132 while (cur != nullptr) {
3133 DCHECK(cur->map() == page()->heap()->root(Heap::kFreeSpaceMapRootIndex));
3134 sum += cur->relaxed_read_size();
3135 cur = cur->next();
3136 }
3137 return sum;
3138 }
3139
3140 int FreeListCategory::FreeListLength() {
3141 int length = 0;
3142 FreeSpace* cur = top();
3143 while (cur != nullptr) {
3144 length++;
3145 cur = cur->next();
3146 if (length == kVeryLongFreeList) return length;
3147 }
3148 return length;
3149 }
3150
3151 bool FreeList::IsVeryLong() {
3152 int len = 0;
3153 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
3154 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
3155 while (it.HasNext()) {
3156 len += it.Next()->FreeListLength();
3157 if (len >= FreeListCategory::kVeryLongFreeList) return true;
3158 }
3159 }
3160 return false;
3161 }
3162
3163
3164 // This can take a very long time because it is linear in the number of entries
3165 // on the free list, so it should not be called if FreeListLength returns
3166 // kVeryLongFreeList.
3167 size_t FreeList::SumFreeLists() {
3168 size_t sum = 0;
3169 ForAllFreeListCategories(
3170 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3171 return sum;
3172 }
3173 #endif
3174
3175
3176 // -----------------------------------------------------------------------------
3177 // OldSpace implementation
3178
3179 void PagedSpace::PrepareForMarkCompact() {
3180 // We don't have a linear allocation area while sweeping. It will be restored
3181 // on the first allocation after the sweep.
3182 FreeLinearAllocationArea();
3183
3184 // Clear the free list before a full GC---it will be rebuilt afterward.
3185 free_list_.Reset();
3186 }
3187
3188 size_t PagedSpace::SizeOfObjects() {
3189 CHECK_GE(limit(), top());
3190 DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
3191 return Size() - (limit() - top());
3192 }
3193
3194 // After we have booted, we have created a map which represents free space
3195 // on the heap. If there was already a free list then the elements on it
3196 // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
3197 // fix them.
3198 void PagedSpace::RepairFreeListsAfterDeserialization() {
3199 free_list_.RepairLists(heap());
3200 // Each page may have a small free space that is not tracked by a free list.
3201 // Those free spaces still contain null as their map pointer.
3202 // Overwrite them with new fillers.
3203 for (Page* page : *this) {
3204 int size = static_cast<int>(page->wasted_memory());
3205 if (size == 0) {
3206 // If there is no wasted memory then all free space is in the free list.
3207 continue;
3208 }
3209 Address start = page->HighWaterMark();
3210 Address end = page->area_end();
3211 if (start < end - size) {
3212 // A region at the high watermark is already in free list.
3213 HeapObject* filler = HeapObject::FromAddress(start);
3214 CHECK(filler->IsFiller());
3215 start += filler->Size();
3216 }
3217 CHECK_EQ(size, static_cast<int>(end - start));
3218 heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
3219 }
3220 }
3221
3222 bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
3223 MarkCompactCollector* collector = heap()->mark_compact_collector();
3224 if (collector->sweeping_in_progress()) {
3225 // Wait for the sweeper threads here and complete the sweeping phase.
3226 collector->EnsureSweepingCompleted();
3227
3228 // After waiting for the sweeper threads, there may be new free-list
3229 // entries.
3230 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3231 }
3232 return false;
3233 }
3234
3235 bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
3236 MarkCompactCollector* collector = heap()->mark_compact_collector();
3237 if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3238 collector->sweeper()->ParallelSweepSpace(identity(), 0);
3239 RefillFreeList();
3240 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3241 }
3242 return false;
3243 }
3244
3245 bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3246 VMState<GC> state(heap()->isolate());
3247 RuntimeCallTimerScope runtime_timer(
3248 heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
3249 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3250 }
3251
3252 bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3253 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3254 }
3255
3256 bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
3257 // Allocation in this space has failed.
3258 DCHECK_GE(size_in_bytes, 0);
3259 const int kMaxPagesToSweep = 1;
3260
3261 if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
3262
3263 MarkCompactCollector* collector = heap()->mark_compact_collector();
3264 // Sweeping is still in progress.
3265 if (collector->sweeping_in_progress()) {
3266 if (FLAG_concurrent_sweeping && !is_local() &&
3267 !collector->sweeper()->AreSweeperTasksRunning()) {
3268 collector->EnsureSweepingCompleted();
3269 }
3270
3271 // First try to refill the free-list, concurrent sweeper threads
3272 // may have freed some objects in the meantime.
3273 RefillFreeList();
3274
3275 // Retry the free list allocation.
3276 if (RefillLinearAllocationAreaFromFreeList(
3277 static_cast<size_t>(size_in_bytes)))
3278 return true;
3279
3280 // If sweeping is still in progress try to sweep pages.
3281 int max_freed = collector->sweeper()->ParallelSweepSpace(
3282 identity(), size_in_bytes, kMaxPagesToSweep);
3283 RefillFreeList();
3284 if (max_freed >= size_in_bytes) {
3285 if (RefillLinearAllocationAreaFromFreeList(
3286 static_cast<size_t>(size_in_bytes)))
3287 return true;
3288 }
3289 } else if (is_local()) {
3290 // Sweeping not in progress and we are on a {CompactionSpace}. This can
3291 // only happen when we are evacuating for the young generation.
3292 PagedSpace* main_space = heap()->paged_space(identity());
3293 Page* page = main_space->RemovePageSafe(size_in_bytes);
3294 if (page != nullptr) {
3295 AddPage(page);
3296 if (RefillLinearAllocationAreaFromFreeList(
3297 static_cast<size_t>(size_in_bytes)))
3298 return true;
3299 }
3300 }
3301
3302 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3303 DCHECK((CountTotalPages() > 1) ||
3304 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3305 return RefillLinearAllocationAreaFromFreeList(
3306 static_cast<size_t>(size_in_bytes));
3307 }
3308
3309 // If sweeper threads are active, wait for them at that point and steal
3310 // elements form their free-lists. Allocation may still fail their which
3311 // would indicate that there is not enough memory for the given allocation.
3312 return SweepAndRetryAllocation(size_in_bytes);
3313 }
3314
3315 // -----------------------------------------------------------------------------
3316 // MapSpace implementation
3317
3318 #ifdef VERIFY_HEAP
3319 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
3320 #endif
3321
3322 ReadOnlySpace::ReadOnlySpace(Heap* heap)
3323 : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
3324 is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
3325 }
3326
3327 void ReadOnlyPage::MakeHeaderRelocatable() {
3328 if (mutex_ != nullptr) {
3329 // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
3330 delete mutex_;
3331 mutex_ = nullptr;
3332 local_tracker_ = nullptr;
3333 reservation_.Reset();
3334 }
3335 }
3336
3337 void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
3338 const size_t page_size = MemoryAllocator::GetCommitPageSize();
3339 const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
3340 for (Page* p : *this) {
3341 ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
3342 if (access == PageAllocator::kRead) {
3343 page->MakeHeaderRelocatable();
3344 }
3345 CHECK(SetPermissions(page->address() + area_start_offset,
3346 page->size() - area_start_offset, access));
3347 }
3348 }
3349
3350 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
3351 if (is_string_padding_cleared_) return;
3352
3353 WritableScope writable_scope(this);
3354 for (Page* page : *this) {
3355 HeapObjectIterator iterator(page);
3356 for (HeapObject* o = iterator.Next(); o != nullptr; o = iterator.Next()) {
3357 if (o->IsSeqOneByteString()) {
3358 SeqOneByteString::cast(o)->clear_padding();
3359 } else if (o->IsSeqTwoByteString()) {
3360 SeqTwoByteString::cast(o)->clear_padding();
3361 }
3362 }
3363 }
3364 is_string_padding_cleared_ = true;
3365 }
3366
3367 void ReadOnlySpace::MarkAsReadOnly() {
3368 DCHECK(!is_marked_read_only_);
3369 FreeLinearAllocationArea();
3370 is_marked_read_only_ = true;
3371 SetPermissionsForPages(PageAllocator::kRead);
3372 }
3373
3374 void ReadOnlySpace::MarkAsReadWrite() {
3375 DCHECK(is_marked_read_only_);
3376 SetPermissionsForPages(PageAllocator::kReadWrite);
3377 is_marked_read_only_ = false;
3378 }
3379
3380 Address LargePage::GetAddressToShrink(Address object_address,
3381 size_t object_size) {
3382 if (executable() == EXECUTABLE) {
3383 return 0;
3384 }
3385 size_t used_size = ::RoundUp((object_address - address()) + object_size,
3386 MemoryAllocator::GetCommitPageSize());
3387 if (used_size < CommittedPhysicalMemory()) {
3388 return address() + used_size;
3389 }
3390 return 0;
3391 }
3392
3393 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3394 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
3395 SlotSet::FREE_EMPTY_BUCKETS);
3396 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
3397 SlotSet::FREE_EMPTY_BUCKETS);
3398 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
3399 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
3400 }
3401
3402 // -----------------------------------------------------------------------------
3403 // LargeObjectIterator
3404
3405 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3406 current_ = space->first_page();
3407 }
3408
3409
3410 HeapObject* LargeObjectIterator::Next() {
3411 if (current_ == nullptr) return nullptr;
3412
3413 HeapObject* object = current_->GetObject();
3414 current_ = current_->next_page();
3415 return object;
3416 }
3417
3418
3419 // -----------------------------------------------------------------------------
3420 // LargeObjectSpace
3421
3422 LargeObjectSpace::LargeObjectSpace(Heap* heap)
3423 : LargeObjectSpace(heap, LO_SPACE) {}
3424
3425 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
3426 : Space(heap, id),
3427 size_(0),
3428 page_count_(0),
3429 objects_size_(0),
3430 chunk_map_(1024) {}
3431
3432 void LargeObjectSpace::TearDown() {
3433 while (!memory_chunk_list_.Empty()) {
3434 LargePage* page = first_page();
3435 LOG(heap()->isolate(),
3436 DeleteEvent("LargeObjectChunk",
3437 reinterpret_cast<void*>(page->address())));
3438 memory_chunk_list_.Remove(page);
3439 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3440 }
3441 }
3442
3443 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
3444 Executability executable) {
3445 // Check if we want to force a GC before growing the old space further.
3446 // If so, fail the allocation.
3447 if (!heap()->CanExpandOldGeneration(object_size) ||
3448 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3449 return AllocationResult::Retry(identity());
3450 }
3451
3452 LargePage* page = AllocateLargePage(object_size, executable);
3453 if (page == nullptr) return AllocationResult::Retry(identity());
3454 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3455 HeapObject* object = page->GetObject();
3456 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3457 heap()->GCFlagsForIncrementalMarking(),
3458 kGCCallbackScheduleIdleGarbageCollection);
3459 if (heap()->incremental_marking()->black_allocation()) {
3460 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
3461 }
3462 DCHECK_IMPLIES(
3463 heap()->incremental_marking()->black_allocation(),
3464 heap()->incremental_marking()->marking_state()->IsBlack(object));
3465 page->InitializationMemoryFence();
3466 return object;
3467 }
3468
3469 LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
3470 Executability executable) {
3471 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3472 object_size, this, executable);
3473 if (page == nullptr) return nullptr;
3474 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
3475
3476 size_ += static_cast<int>(page->size());
3477 AccountCommitted(page->size());
3478 objects_size_ += object_size;
3479 page_count_++;
3480 memory_chunk_list_.PushBack(page);
3481
3482 InsertChunkMapEntries(page);
3483
3484 HeapObject* object = page->GetObject();
3485
3486 if (Heap::ShouldZapGarbage()) {
3487 // Make the object consistent so the heap can be verified in OldSpaceStep.
3488 // We only need to do this in debug builds or if verify_heap is on.
3489 reinterpret_cast<Object**>(object->address())[0] =
3490 ReadOnlyRoots(heap()).fixed_array_map();
3491 reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
3492 }
3493 heap()->CreateFillerObjectAt(object->address(), object_size,
3494 ClearRecordedSlots::kNo);
3495 AllocationStep(object_size, object->address(), object_size);
3496 return page;
3497 }
3498
3499
3500 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3501 // On a platform that provides lazy committing of memory, we over-account
3502 // the actually committed memory. There is no easy way right now to support
3503 // precise accounting of committed memory in large object space.
3504 return CommittedMemory();
3505 }
3506
3507
3508 // GC support
3509 Object* LargeObjectSpace::FindObject(Address a) {
3510 LargePage* page = FindPage(a);
3511 if (page != nullptr) {
3512 return page->GetObject();
3513 }
3514 return Smi::kZero; // Signaling not found.
3515 }
3516
3517 LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
3518 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3519 return FindPage(a);
3520 }
3521
3522 LargePage* LargeObjectSpace::FindPage(Address a) {
3523 const Address key = MemoryChunk::FromAddress(a)->address();
3524 auto it = chunk_map_.find(key);
3525 if (it != chunk_map_.end()) {
3526 LargePage* page = it->second;
3527 if (page->Contains(a)) {
3528 return page;
3529 }
3530 }
3531 return nullptr;
3532 }
3533
3534
3535 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3536 IncrementalMarking::NonAtomicMarkingState* marking_state =
3537 heap()->incremental_marking()->non_atomic_marking_state();
3538 LargeObjectIterator it(this);
3539 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3540 if (marking_state->IsBlackOrGrey(obj)) {
3541 Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3542 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
3543 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3544 chunk->ResetProgressBar();
3545 marking_state->SetLiveBytes(chunk, 0);
3546 }
3547 DCHECK(marking_state->IsWhite(obj));
3548 }
3549 }
3550
3551 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3552 // There may be concurrent access on the chunk map. We have to take the lock
3553 // here.
3554 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3555 for (Address current = reinterpret_cast<Address>(page);
3556 current < reinterpret_cast<Address>(page) + page->size();
3557 current += MemoryChunk::kPageSize) {
3558 chunk_map_[current] = page;
3559 }
3560 }
3561
3562 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3563 RemoveChunkMapEntries(page, page->address());
3564 }
3565
3566 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3567 Address free_start) {
3568 for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
3569 current < reinterpret_cast<Address>(page) + page->size();
3570 current += MemoryChunk::kPageSize) {
3571 chunk_map_.erase(current);
3572 }
3573 }
3574
3575 void LargeObjectSpace::FreeUnmarkedObjects() {
3576 LargePage* current = first_page();
3577 IncrementalMarking::NonAtomicMarkingState* marking_state =
3578 heap()->incremental_marking()->non_atomic_marking_state();
3579 objects_size_ = 0;
3580 while (current) {
3581 LargePage* next_current = current->next_page();
3582 HeapObject* object = current->GetObject();
3583 DCHECK(!marking_state->IsGrey(object));
3584 if (marking_state->IsBlack(object)) {
3585 Address free_start;
3586 size_t size = static_cast<size_t>(object->Size());
3587 objects_size_ += size;
3588 if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
3589 0) {
3590 DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3591 current->ClearOutOfLiveRangeSlots(free_start);
3592 RemoveChunkMapEntries(current, free_start);
3593 const size_t bytes_to_free =
3594 current->size() - (free_start - current->address());
3595 heap()->memory_allocator()->PartialFreeMemory(
3596 current, free_start, bytes_to_free,
3597 current->area_start() + object->Size());
3598 size_ -= bytes_to_free;
3599 AccountUncommitted(bytes_to_free);
3600 }
3601 } else {
3602 memory_chunk_list_.Remove(current);
3603
3604 // Free the chunk.
3605 size_ -= static_cast<int>(current->size());
3606 AccountUncommitted(current->size());
3607 page_count_--;
3608
3609 RemoveChunkMapEntries(current);
3610 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
3611 current);
3612 }
3613 current = next_current;
3614 }
3615 }
3616
3617
3618 bool LargeObjectSpace::Contains(HeapObject* object) {
3619 Address address = object->address();
3620 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3621
3622 bool owned = (chunk->owner() == this);
3623
3624 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3625
3626 return owned;
3627 }
3628
3629 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3630 return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3631 }
3632
3633 #ifdef VERIFY_HEAP
3634 // We do not assume that the large object iterator works, because it depends
3635 // on the invariants we are checking during verification.
3636 void LargeObjectSpace::Verify(Isolate* isolate) {
3637 size_t external_backing_store_bytes[kNumTypes];
3638
3639 for (int i = 0; i < kNumTypes; i++) {
3640 external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
3641 }
3642
3643 for (LargePage* chunk = first_page(); chunk != nullptr;
3644 chunk = chunk->next_page()) {
3645 // Each chunk contains an object that starts at the large object page's
3646 // object area start.
3647 HeapObject* object = chunk->GetObject();
3648 Page* page = Page::FromAddress(object->address());
3649 CHECK(object->address() == page->area_start());
3650
3651 // The first word should be a map, and we expect all map pointers to be
3652 // in map space or read-only space.
3653 Map* map = object->map();
3654 CHECK(map->IsMap());
3655 CHECK(heap()->map_space()->Contains(map) ||
3656 heap()->read_only_space()->Contains(map));
3657
3658 // We have only the following types in the large object space:
3659 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3660 object->IsExternalString() || object->IsThinString() ||
3661 object->IsFixedArray() || object->IsFixedDoubleArray() ||
3662 object->IsWeakFixedArray() || object->IsWeakArrayList() ||
3663 object->IsPropertyArray() || object->IsByteArray() ||
3664 object->IsFeedbackVector() || object->IsBigInt() ||
3665 object->IsFreeSpace() || object->IsFeedbackMetadata());
3666
3667 // The object itself should look OK.
3668 object->ObjectVerify(isolate);
3669
3670 if (!FLAG_verify_heap_skip_remembered_set) {
3671 heap()->VerifyRememberedSetFor(object);
3672 }
3673
3674 // Byte arrays and strings don't have interior pointers.
3675 if (object->IsAbstractCode()) {
3676 VerifyPointersVisitor code_visitor(heap());
3677 object->IterateBody(map, object->Size(), &code_visitor);
3678 } else if (object->IsFixedArray()) {
3679 FixedArray* array = FixedArray::cast(object);
3680 for (int j = 0; j < array->length(); j++) {
3681 Object* element = array->get(j);
3682 if (element->IsHeapObject()) {
3683 HeapObject* element_object = HeapObject::cast(element);
3684 CHECK(heap()->Contains(element_object));
3685 CHECK(element_object->map()->IsMap());
3686 }
3687 }
3688 } else if (object->IsPropertyArray()) {
3689 PropertyArray* array = PropertyArray::cast(object);
3690 for (int j = 0; j < array->length(); j++) {
3691 Object* property = array->get(j);
3692 if (property->IsHeapObject()) {
3693 HeapObject* property_object = HeapObject::cast(property);
3694 CHECK(heap()->Contains(property_object));
3695 CHECK(property_object->map()->IsMap());
3696 }
3697 }
3698 }
3699 for (int i = 0; i < kNumTypes; i++) {
3700 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
3701 external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
3702 }
3703 }
3704 for (int i = 0; i < kNumTypes; i++) {
3705 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
3706 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
3707 }
3708 }
3709 #endif
3710
3711 #ifdef DEBUG
3712 void LargeObjectSpace::Print() {
3713 StdoutStream os;
3714 LargeObjectIterator it(this);
3715 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3716 obj->Print(os);
3717 }
3718 }
3719
3720 void Page::Print() {
3721 // Make a best-effort to print the objects in the page.
3722 PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
3723 this->owner()->name());
3724 printf(" --------------------------------------\n");
3725 HeapObjectIterator objects(this);
3726 unsigned mark_size = 0;
3727 for (HeapObject* object = objects.Next(); object != nullptr;
3728 object = objects.Next()) {
3729 bool is_marked =
3730 heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
3731 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3732 if (is_marked) {
3733 mark_size += object->Size();
3734 }
3735 object->ShortPrint();
3736 PrintF("\n");
3737 }
3738 printf(" --------------------------------------\n");
3739 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
3740 heap()->incremental_marking()->marking_state()->live_bytes(this));
3741 }
3742
3743 #endif // DEBUG
3744
3745 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
3746 : LargeObjectSpace(heap, NEW_LO_SPACE) {}
3747
3748 AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
3749 // TODO(hpayer): Add heap growing strategy here.
3750 LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
3751 if (page == nullptr) return AllocationResult::Retry(identity());
3752 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3753 page->SetFlag(MemoryChunk::IN_TO_SPACE);
3754 page->InitializationMemoryFence();
3755 return page->GetObject();
3756 }
3757
3758 size_t NewLargeObjectSpace::Available() {
3759 // TODO(hpayer): Update as soon as we have a growing strategy.
3760 return 0;
3761 }
3762 } // namespace internal
3763 } // namespace v8
3764