• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/paged-spaces.h"
6 
7 #include "src/base/optional.h"
8 #include "src/base/platform/mutex.h"
9 #include "src/execution/isolate.h"
10 #include "src/execution/vm-state-inl.h"
11 #include "src/heap/array-buffer-sweeper.h"
12 #include "src/heap/heap.h"
13 #include "src/heap/incremental-marking.h"
14 #include "src/heap/memory-allocator.h"
15 #include "src/heap/memory-chunk-inl.h"
16 #include "src/heap/paged-spaces-inl.h"
17 #include "src/heap/read-only-heap.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/string.h"
20 #include "src/utils/utils.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 // ----------------------------------------------------------------------------
26 // PagedSpaceObjectIterator
27 
PagedSpaceObjectIterator(Heap * heap,PagedSpace * space)28 PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
29                                                    PagedSpace* space)
30     : cur_addr_(kNullAddress),
31       cur_end_(kNullAddress),
32       space_(space),
33       page_range_(space->first_page(), nullptr),
34       current_page_(page_range_.begin()) {
35   space_->MakeLinearAllocationAreaIterable();
36   heap->mark_compact_collector()->EnsureSweepingCompleted();
37 }
38 
PagedSpaceObjectIterator(Heap * heap,PagedSpace * space,Page * page)39 PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
40                                                    PagedSpace* space,
41                                                    Page* page)
42     : cur_addr_(kNullAddress),
43       cur_end_(kNullAddress),
44       space_(space),
45       page_range_(page),
46       current_page_(page_range_.begin()) {
47   space_->MakeLinearAllocationAreaIterable();
48   heap->mark_compact_collector()->EnsureSweepingCompleted();
49 #ifdef DEBUG
50   AllocationSpace owner = page->owner_identity();
51   DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
52 #endif  // DEBUG
53 }
54 
55 // We have hit the end of the page and should advance to the next block of
56 // objects.  This happens at the end of the page.
AdvanceToNextPage()57 bool PagedSpaceObjectIterator::AdvanceToNextPage() {
58   DCHECK_EQ(cur_addr_, cur_end_);
59   if (current_page_ == page_range_.end()) return false;
60   Page* cur_page = *(current_page_++);
61 
62   cur_addr_ = cur_page->area_start();
63   cur_end_ = cur_page->area_end();
64   DCHECK(cur_page->SweepingDone());
65   return true;
66 }
InitializePage(MemoryChunk * chunk)67 Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
68   Page* page = static_cast<Page*>(chunk);
69   DCHECK_EQ(
70       MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
71       page->area_size());
72   // Make sure that categories are initialized before freeing the area.
73   page->ResetAllocationStatistics();
74   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
75   page->AllocateFreeListCategories();
76   page->InitializeFreeListCategories();
77   page->list_node().Initialize();
78   page->InitializationMemoryFence();
79   return page;
80 }
81 
PagedSpace(Heap * heap,AllocationSpace space,Executability executable,FreeList * free_list,LocalSpaceKind local_space_kind)82 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
83                        Executability executable, FreeList* free_list,
84                        LocalSpaceKind local_space_kind)
85     : SpaceWithLinearArea(heap, space, free_list),
86       executable_(executable),
87       local_space_kind_(local_space_kind) {
88   area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
89   accounting_stats_.Clear();
90 }
91 
TearDown()92 void PagedSpace::TearDown() {
93   while (!memory_chunk_list_.Empty()) {
94     MemoryChunk* chunk = memory_chunk_list_.front();
95     memory_chunk_list_.Remove(chunk);
96     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
97   }
98   accounting_stats_.Clear();
99 }
100 
RefillFreeList()101 void PagedSpace::RefillFreeList() {
102   // Any PagedSpace might invoke RefillFreeList. We filter all but our old
103   // generation spaces out.
104   if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
105       identity() != MAP_SPACE) {
106     return;
107   }
108   DCHECK_IMPLIES(is_local_space(), is_compaction_space());
109   MarkCompactCollector* collector = heap()->mark_compact_collector();
110   size_t added = 0;
111 
112   {
113     Page* p = nullptr;
114     while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
115       // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
116       // entries here to make them unavailable for allocations.
117       if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
118         p->ForAllFreeListCategories([this](FreeListCategory* category) {
119           category->Reset(free_list());
120         });
121       }
122 
123       // Also merge old-to-new remembered sets if not scavenging because of
124       // data races: One thread might iterate remembered set, while another
125       // thread merges them.
126       if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
127         p->MergeOldToNewRememberedSets();
128       }
129 
130       // Only during compaction pages can actually change ownership. This is
131       // safe because there exists no other competing action on the page links
132       // during compaction.
133       if (is_compaction_space()) {
134         DCHECK_NE(this, p->owner());
135         PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
136         base::MutexGuard guard(owner->mutex());
137         owner->RefineAllocatedBytesAfterSweeping(p);
138         owner->RemovePage(p);
139         added += AddPage(p);
140         added += p->wasted_memory();
141       } else {
142         base::MutexGuard guard(mutex());
143         DCHECK_EQ(this, p->owner());
144         RefineAllocatedBytesAfterSweeping(p);
145         added += RelinkFreeListCategories(p);
146         added += p->wasted_memory();
147       }
148       if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
149     }
150   }
151 }
152 
MergeLocalSpace(LocalSpace * other)153 void PagedSpace::MergeLocalSpace(LocalSpace* other) {
154   base::MutexGuard guard(mutex());
155 
156   DCHECK(identity() == other->identity());
157 
158   // Unmerged fields:
159   //   area_size_
160   other->FreeLinearAllocationArea();
161 
162   for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
163        i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
164     allocations_origins_[i] += other->allocations_origins_[i];
165   }
166 
167   // The linear allocation area of {other} should be destroyed now.
168   DCHECK_EQ(kNullAddress, other->top());
169   DCHECK_EQ(kNullAddress, other->limit());
170 
171   // Move over pages.
172   for (auto it = other->begin(); it != other->end();) {
173     Page* p = *(it++);
174 
175       p->MergeOldToNewRememberedSets();
176 
177     // Ensure that pages are initialized before objects on it are discovered by
178     // concurrent markers.
179     p->InitializationMemoryFence();
180 
181     // Relinking requires the category to be unlinked.
182     other->RemovePage(p);
183     AddPage(p);
184     DCHECK_IMPLIES(
185         !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
186         p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
187 
188     // TODO(leszeks): Here we should allocation step, but:
189     //   1. Allocation groups are currently not handled properly by the sampling
190     //      allocation profiler, and
191     //   2. Observers might try to take the space lock, which isn't reentrant.
192     // We'll have to come up with a better solution for allocation stepping
193     // before shipping, which will likely be using LocalHeap.
194   }
195   for (auto p : other->GetNewPages()) {
196     heap()->NotifyOldGenerationExpansion(identity(), p);
197   }
198 
199   DCHECK_EQ(0u, other->Size());
200   DCHECK_EQ(0u, other->Capacity());
201 }
202 
CommittedPhysicalMemory()203 size_t PagedSpace::CommittedPhysicalMemory() {
204   if (!base::OS::HasLazyCommits()) return CommittedMemory();
205   BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
206   base::MutexGuard guard(mutex());
207   size_t size = 0;
208   for (Page* page : *this) {
209     size += page->CommittedPhysicalMemory();
210   }
211   return size;
212 }
213 
ContainsSlow(Address addr) const214 bool PagedSpace::ContainsSlow(Address addr) const {
215   Page* p = Page::FromAddress(addr);
216   for (const Page* page : *this) {
217     if (page == p) return true;
218   }
219   return false;
220 }
221 
RefineAllocatedBytesAfterSweeping(Page * page)222 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
223   CHECK(page->SweepingDone());
224   auto marking_state =
225       heap()->incremental_marking()->non_atomic_marking_state();
226   // The live_byte on the page was accounted in the space allocated
227   // bytes counter. After sweeping allocated_bytes() contains the
228   // accurate live byte count on the page.
229   size_t old_counter = marking_state->live_bytes(page);
230   size_t new_counter = page->allocated_bytes();
231   DCHECK_GE(old_counter, new_counter);
232   if (old_counter > new_counter) {
233     DecreaseAllocatedBytes(old_counter - new_counter, page);
234   }
235   marking_state->SetLiveBytes(page, 0);
236 }
237 
RemovePageSafe(int size_in_bytes)238 Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
239   base::MutexGuard guard(mutex());
240   Page* page = free_list()->GetPageForSize(size_in_bytes);
241   if (!page) return nullptr;
242   RemovePage(page);
243   return page;
244 }
245 
AddPage(Page * page)246 size_t PagedSpace::AddPage(Page* page) {
247   CHECK(page->SweepingDone());
248   page->set_owner(this);
249   memory_chunk_list_.PushBack(page);
250   AccountCommitted(page->size());
251   IncreaseCapacity(page->area_size());
252   IncreaseAllocatedBytes(page->allocated_bytes(), page);
253   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
254     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
255     IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
256   }
257   return RelinkFreeListCategories(page);
258 }
259 
RemovePage(Page * page)260 void PagedSpace::RemovePage(Page* page) {
261   CHECK(page->SweepingDone());
262   memory_chunk_list_.Remove(page);
263   UnlinkFreeListCategories(page);
264   DecreaseAllocatedBytes(page->allocated_bytes(), page);
265   DecreaseCapacity(page->area_size());
266   AccountUncommitted(page->size());
267   for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
268     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
269     DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
270   }
271 }
272 
SetTopAndLimit(Address top,Address limit)273 void PagedSpace::SetTopAndLimit(Address top, Address limit) {
274   DCHECK(top == limit ||
275          Page::FromAddress(top) == Page::FromAddress(limit - 1));
276   BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
277   allocation_info_.Reset(top, limit);
278 }
279 
ShrinkPageToHighWaterMark(Page * page)280 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
281   size_t unused = page->ShrinkToHighWaterMark();
282   accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
283   AccountUncommitted(unused);
284   return unused;
285 }
286 
ResetFreeList()287 void PagedSpace::ResetFreeList() {
288   for (Page* page : *this) {
289     free_list_->EvictFreeListItems(page);
290   }
291   DCHECK(free_list_->IsEmpty());
292 }
293 
ShrinkImmortalImmovablePages()294 void PagedSpace::ShrinkImmortalImmovablePages() {
295   DCHECK(!heap()->deserialization_complete());
296   BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
297   FreeLinearAllocationArea();
298   ResetFreeList();
299   for (Page* page : *this) {
300     DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
301     ShrinkPageToHighWaterMark(page);
302   }
303 }
304 
AllocatePage()305 Page* PagedSpace::AllocatePage() {
306   return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
307                                                   executable());
308 }
309 
Expand()310 Page* PagedSpace::Expand() {
311   Page* page = AllocatePage();
312   if (page == nullptr) return nullptr;
313   ConcurrentAllocationMutex guard(this);
314   AddPage(page);
315   Free(page->area_start(), page->area_size(),
316        SpaceAccountingMode::kSpaceAccounted);
317   return page;
318 }
319 
ExpandBackground(LocalHeap * local_heap)320 Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) {
321   Page* page = AllocatePage();
322   if (page == nullptr) return nullptr;
323   base::MutexGuard lock(&space_mutex_);
324   AddPage(page);
325   Free(page->area_start(), page->area_size(),
326        SpaceAccountingMode::kSpaceAccounted);
327   return page;
328 }
329 
CountTotalPages()330 int PagedSpace::CountTotalPages() {
331   int count = 0;
332   for (Page* page : *this) {
333     count++;
334     USE(page);
335   }
336   return count;
337 }
338 
SetLinearAllocationArea(Address top,Address limit)339 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
340   SetTopAndLimit(top, limit);
341   if (top != kNullAddress && top != limit &&
342       heap()->incremental_marking()->black_allocation()) {
343     Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
344   }
345 }
346 
DecreaseLimit(Address new_limit)347 void PagedSpace::DecreaseLimit(Address new_limit) {
348   Address old_limit = limit();
349   DCHECK_LE(top(), new_limit);
350   DCHECK_GE(old_limit, new_limit);
351   if (new_limit != old_limit) {
352     base::Optional<CodePageMemoryModificationScope> optional_scope;
353 
354     if (identity() == CODE_SPACE) {
355       MemoryChunk* chunk = MemoryChunk::FromAddress(new_limit);
356       optional_scope.emplace(chunk);
357     }
358 
359     SetTopAndLimit(top(), new_limit);
360     Free(new_limit, old_limit - new_limit,
361          SpaceAccountingMode::kSpaceAccounted);
362     if (heap()->incremental_marking()->black_allocation()) {
363       Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
364                                                                    old_limit);
365     }
366   }
367 }
368 
MarkLinearAllocationAreaBlack()369 void PagedSpace::MarkLinearAllocationAreaBlack() {
370   DCHECK(heap()->incremental_marking()->black_allocation());
371   Address current_top = top();
372   Address current_limit = limit();
373   if (current_top != kNullAddress && current_top != current_limit) {
374     Page::FromAllocationAreaAddress(current_top)
375         ->CreateBlackArea(current_top, current_limit);
376   }
377 }
378 
UnmarkLinearAllocationArea()379 void PagedSpace::UnmarkLinearAllocationArea() {
380   Address current_top = top();
381   Address current_limit = limit();
382   if (current_top != kNullAddress && current_top != current_limit) {
383     Page::FromAllocationAreaAddress(current_top)
384         ->DestroyBlackArea(current_top, current_limit);
385   }
386 }
387 
MakeLinearAllocationAreaIterable()388 void PagedSpace::MakeLinearAllocationAreaIterable() {
389   Address current_top = top();
390   Address current_limit = limit();
391   if (current_top != kNullAddress && current_top != current_limit) {
392     base::Optional<CodePageMemoryModificationScope> optional_scope;
393 
394     if (identity() == CODE_SPACE) {
395       MemoryChunk* chunk = MemoryChunk::FromAddress(current_top);
396       optional_scope.emplace(chunk);
397     }
398 
399     heap_->CreateFillerObjectAt(current_top,
400                                 static_cast<int>(current_limit - current_top),
401                                 ClearRecordedSlots::kNo);
402   }
403 }
404 
Available()405 size_t PagedSpace::Available() {
406   ConcurrentAllocationMutex guard(this);
407   return free_list_->Available();
408 }
409 
FreeLinearAllocationArea()410 void PagedSpace::FreeLinearAllocationArea() {
411   // Mark the old linear allocation area with a free space map so it can be
412   // skipped when scanning the heap.
413   Address current_top = top();
414   Address current_limit = limit();
415   if (current_top == kNullAddress) {
416     DCHECK_EQ(kNullAddress, current_limit);
417     return;
418   }
419 
420   AdvanceAllocationObservers();
421 
422   if (current_top != current_limit &&
423       heap()->incremental_marking()->black_allocation()) {
424     Page::FromAddress(current_top)
425         ->DestroyBlackArea(current_top, current_limit);
426   }
427 
428   SetTopAndLimit(kNullAddress, kNullAddress);
429   DCHECK_GE(current_limit, current_top);
430 
431   // The code page of the linear allocation area needs to be unprotected
432   // because we are going to write a filler into that memory area below.
433   if (identity() == CODE_SPACE) {
434     heap()->UnprotectAndRegisterMemoryChunk(
435         MemoryChunk::FromAddress(current_top));
436   }
437 
438   DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
439                  heap()->incremental_marking()->marking_state()->IsWhite(
440                      HeapObject::FromAddress(current_top)));
441   Free(current_top, current_limit - current_top,
442        SpaceAccountingMode::kSpaceAccounted);
443 }
444 
ReleasePage(Page * page)445 void PagedSpace::ReleasePage(Page* page) {
446   DCHECK_EQ(
447       0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
448              page));
449   DCHECK_EQ(page->owner(), this);
450 
451   free_list_->EvictFreeListItems(page);
452 
453   if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
454     SetTopAndLimit(kNullAddress, kNullAddress);
455   }
456 
457   if (identity() == CODE_SPACE) {
458     heap()->isolate()->RemoveCodeMemoryChunk(page);
459   }
460 
461   AccountUncommitted(page->size());
462   accounting_stats_.DecreaseCapacity(page->area_size());
463   heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
464 }
465 
SetReadable()466 void PagedSpace::SetReadable() {
467   DCHECK(identity() == CODE_SPACE);
468   for (Page* page : *this) {
469     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
470     page->SetReadable();
471   }
472 }
473 
SetReadAndExecutable()474 void PagedSpace::SetReadAndExecutable() {
475   DCHECK(identity() == CODE_SPACE);
476   for (Page* page : *this) {
477     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
478     page->SetReadAndExecutable();
479   }
480 }
481 
SetReadAndWritable()482 void PagedSpace::SetReadAndWritable() {
483   DCHECK(identity() == CODE_SPACE);
484   for (Page* page : *this) {
485     CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
486     page->SetReadAndWritable();
487   }
488 }
489 
GetObjectIterator(Heap * heap)490 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
491   return std::unique_ptr<ObjectIterator>(
492       new PagedSpaceObjectIterator(heap, this));
493 }
494 
TryAllocationFromFreeListMain(size_t size_in_bytes,AllocationOrigin origin)495 bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
496                                                AllocationOrigin origin) {
497   ConcurrentAllocationMutex guard(this);
498   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
499   DCHECK_LE(top(), limit());
500 #ifdef DEBUG
501   if (top() != limit()) {
502     DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
503   }
504 #endif
505   // Don't free list allocate if there is linear space available.
506   DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
507 
508   // Mark the old linear allocation area with a free space map so it can be
509   // skipped when scanning the heap.  This also puts it back in the free list
510   // if it is big enough.
511   FreeLinearAllocationArea();
512 
513   size_t new_node_size = 0;
514   FreeSpace new_node =
515       free_list_->Allocate(size_in_bytes, &new_node_size, origin);
516   if (new_node.is_null()) return false;
517   DCHECK_GE(new_node_size, size_in_bytes);
518 
519   // The old-space-step might have finished sweeping and restarted marking.
520   // Verify that it did not turn the page of the new node into an evacuation
521   // candidate.
522   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
523 
524   // Memory in the linear allocation area is counted as allocated.  We may free
525   // a little of this again immediately - see below.
526   Page* page = Page::FromHeapObject(new_node);
527   IncreaseAllocatedBytes(new_node_size, page);
528 
529   DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
530   Address start = new_node.address();
531   Address end = new_node.address() + new_node_size;
532   Address limit = ComputeLimit(start, end, size_in_bytes);
533   DCHECK_LE(limit, end);
534   DCHECK_LE(size_in_bytes, limit - start);
535   if (limit != end) {
536     if (identity() == CODE_SPACE) {
537       heap()->UnprotectAndRegisterMemoryChunk(page);
538     }
539     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
540   }
541   SetLinearAllocationArea(start, limit);
542 
543   return true;
544 }
545 
RawRefillLabBackground(LocalHeap * local_heap,size_t min_size_in_bytes,size_t max_size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)546 base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
547     LocalHeap* local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes,
548     AllocationAlignment alignment, AllocationOrigin origin) {
549   DCHECK(!is_local_space() && identity() == OLD_SPACE);
550   DCHECK_EQ(origin, AllocationOrigin::kRuntime);
551 
552   auto result = TryAllocationFromFreeListBackground(
553       local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
554   if (result) return result;
555 
556   MarkCompactCollector* collector = heap()->mark_compact_collector();
557   // Sweeping is still in progress.
558   if (collector->sweeping_in_progress()) {
559     // First try to refill the free-list, concurrent sweeper threads
560     // may have freed some objects in the meantime.
561     RefillFreeList();
562 
563     // Retry the free list allocation.
564     auto result = TryAllocationFromFreeListBackground(
565         local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
566     if (result) return result;
567 
568     // Now contribute to sweeping from background thread and then try to
569     // reallocate.
570     Sweeper::FreeSpaceMayContainInvalidatedSlots
571         invalidated_slots_in_free_space =
572             Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
573 
574     const int kMaxPagesToSweep = 1;
575     int max_freed = collector->sweeper()->ParallelSweepSpace(
576         identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
577         invalidated_slots_in_free_space);
578 
579     RefillFreeList();
580 
581     if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
582       auto result = TryAllocationFromFreeListBackground(
583           local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
584       if (result) return result;
585     }
586   }
587 
588   if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
589       heap()->CanExpandOldGenerationBackground(AreaSize()) &&
590       ExpandBackground(local_heap)) {
591     DCHECK((CountTotalPages() > 1) ||
592            (min_size_in_bytes <= free_list_->Available()));
593     auto result = TryAllocationFromFreeListBackground(
594         local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
595     if (result) return result;
596   }
597 
598   if (collector->sweeping_in_progress()) {
599     // Complete sweeping for this space.
600     collector->DrainSweepingWorklistForSpace(identity());
601 
602     RefillFreeList();
603 
604     // Last try to acquire memory from free list.
605     return TryAllocationFromFreeListBackground(
606         local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
607   }
608 
609   return {};
610 }
611 
612 base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(LocalHeap * local_heap,size_t min_size_in_bytes,size_t max_size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)613 PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
614                                                 size_t min_size_in_bytes,
615                                                 size_t max_size_in_bytes,
616                                                 AllocationAlignment alignment,
617                                                 AllocationOrigin origin) {
618   base::MutexGuard lock(&space_mutex_);
619   DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
620   DCHECK_EQ(identity(), OLD_SPACE);
621 
622   size_t new_node_size = 0;
623   FreeSpace new_node =
624       free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
625   if (new_node.is_null()) return {};
626   DCHECK_GE(new_node_size, min_size_in_bytes);
627 
628   // The old-space-step might have finished sweeping and restarted marking.
629   // Verify that it did not turn the page of the new node into an evacuation
630   // candidate.
631   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
632 
633   // Memory in the linear allocation area is counted as allocated.  We may free
634   // a little of this again immediately - see below.
635   Page* page = Page::FromHeapObject(new_node);
636   IncreaseAllocatedBytes(new_node_size, page);
637 
638   heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
639 
640   size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
641 
642   Address start = new_node.address();
643   Address end = new_node.address() + new_node_size;
644   Address limit = new_node.address() + used_size_in_bytes;
645   DCHECK_LE(limit, end);
646   DCHECK_LE(min_size_in_bytes, limit - start);
647   if (limit != end) {
648     Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
649   }
650 
651   return std::make_pair(start, used_size_in_bytes);
652 }
653 
654 #ifdef DEBUG
Print()655 void PagedSpace::Print() {}
656 #endif
657 
658 #ifdef VERIFY_HEAP
Verify(Isolate * isolate,ObjectVisitor * visitor)659 void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
660   bool allocation_pointer_found_in_space =
661       (allocation_info_.top() == allocation_info_.limit());
662   size_t external_space_bytes[kNumTypes];
663   size_t external_page_bytes[kNumTypes];
664 
665   for (int i = 0; i < kNumTypes; i++) {
666     external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
667   }
668 
669   for (Page* page : *this) {
670     CHECK_EQ(page->owner(), this);
671 
672     for (int i = 0; i < kNumTypes; i++) {
673       external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
674     }
675 
676     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
677       allocation_pointer_found_in_space = true;
678     }
679     CHECK(page->SweepingDone());
680     PagedSpaceObjectIterator it(isolate->heap(), this, page);
681     Address end_of_previous_object = page->area_start();
682     Address top = page->area_end();
683 
684     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
685       CHECK(end_of_previous_object <= object.address());
686 
687       // The first word should be a map, and we expect all map pointers to
688       // be in map space.
689       Map map = object.map();
690       CHECK(map.IsMap());
691       CHECK(ReadOnlyHeap::Contains(map) ||
692             isolate->heap()->map_space()->Contains(map));
693 
694       // Perform space-specific object verification.
695       VerifyObject(object);
696 
697       // The object itself should look OK.
698       object.ObjectVerify(isolate);
699 
700       if (identity() != RO_SPACE && !FLAG_verify_heap_skip_remembered_set) {
701         isolate->heap()->VerifyRememberedSetFor(object);
702       }
703 
704       // All the interior pointers should be contained in the heap.
705       int size = object.Size();
706       object.IterateBody(map, size, visitor);
707       CHECK(object.address() + size <= top);
708       end_of_previous_object = object.address() + size;
709 
710       if (object.IsExternalString()) {
711         ExternalString external_string = ExternalString::cast(object);
712         size_t size = external_string.ExternalPayloadSize();
713         external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
714       }
715     }
716     for (int i = 0; i < kNumTypes; i++) {
717       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
718       CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
719       external_space_bytes[t] += external_page_bytes[t];
720     }
721   }
722   for (int i = 0; i < kNumTypes; i++) {
723     if (i == ExternalBackingStoreType::kArrayBuffer) continue;
724     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
725     CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
726   }
727   CHECK(allocation_pointer_found_in_space);
728 
729   if (identity() == OLD_SPACE) {
730     size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
731     CHECK_EQ(bytes,
732              ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
733   }
734 
735 #ifdef DEBUG
736   VerifyCountersAfterSweeping(isolate->heap());
737 #endif
738 }
739 
VerifyLiveBytes()740 void PagedSpace::VerifyLiveBytes() {
741   IncrementalMarking::MarkingState* marking_state =
742       heap()->incremental_marking()->marking_state();
743   for (Page* page : *this) {
744     CHECK(page->SweepingDone());
745     PagedSpaceObjectIterator it(heap(), this, page);
746     int black_size = 0;
747     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
748       // All the interior pointers should be contained in the heap.
749       if (marking_state->IsBlack(object)) {
750         black_size += object.Size();
751       }
752     }
753     CHECK_LE(black_size, marking_state->live_bytes(page));
754   }
755 }
756 #endif  // VERIFY_HEAP
757 
758 #ifdef DEBUG
VerifyCountersAfterSweeping(Heap * heap)759 void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
760   size_t total_capacity = 0;
761   size_t total_allocated = 0;
762   for (Page* page : *this) {
763     DCHECK(page->SweepingDone());
764     total_capacity += page->area_size();
765     PagedSpaceObjectIterator it(heap, this, page);
766     size_t real_allocated = 0;
767     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
768       if (!object.IsFreeSpaceOrFiller()) {
769         real_allocated += object.Size();
770       }
771     }
772     total_allocated += page->allocated_bytes();
773     // The real size can be smaller than the accounted size if array trimming,
774     // object slack tracking happened after sweeping.
775     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
776     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
777   }
778   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
779   DCHECK_EQ(total_allocated, accounting_stats_.Size());
780 }
781 
VerifyCountersBeforeConcurrentSweeping()782 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
783   // We need to refine the counters on pages that are already swept and have
784   // not been moved over to the actual space. Otherwise, the AccountingStats
785   // are just an over approximation.
786   RefillFreeList();
787 
788   size_t total_capacity = 0;
789   size_t total_allocated = 0;
790   auto marking_state =
791       heap()->incremental_marking()->non_atomic_marking_state();
792   for (Page* page : *this) {
793     size_t page_allocated =
794         page->SweepingDone()
795             ? page->allocated_bytes()
796             : static_cast<size_t>(marking_state->live_bytes(page));
797     total_capacity += page->area_size();
798     total_allocated += page_allocated;
799     DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
800   }
801   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
802   DCHECK_EQ(total_allocated, accounting_stats_.Size());
803 }
804 #endif
805 
UpdateInlineAllocationLimit(size_t min_size)806 void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
807   // Ensure there are no unaccounted allocations.
808   DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
809 
810   Address new_limit = ComputeLimit(top(), limit(), min_size);
811   DCHECK_LE(top(), new_limit);
812   DCHECK_LE(new_limit, limit());
813   DecreaseLimit(new_limit);
814 }
815 
816 // -----------------------------------------------------------------------------
817 // OldSpace implementation
818 
PrepareForMarkCompact()819 void PagedSpace::PrepareForMarkCompact() {
820   // We don't have a linear allocation area while sweeping.  It will be restored
821   // on the first allocation after the sweep.
822   FreeLinearAllocationArea();
823 
824   // Clear the free list before a full GC---it will be rebuilt afterward.
825   free_list_->Reset();
826 }
827 
RefillLabMain(int size_in_bytes,AllocationOrigin origin)828 bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
829   VMState<GC> state(heap()->isolate());
830   RuntimeCallTimerScope runtime_timer(
831       heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
832   return RawRefillLabMain(size_in_bytes, origin);
833 }
834 
Expand()835 Page* LocalSpace::Expand() {
836   Page* page = PagedSpace::Expand();
837   new_pages_.push_back(page);
838   return page;
839 }
840 
RefillLabMain(int size_in_bytes,AllocationOrigin origin)841 bool CompactionSpace::RefillLabMain(int size_in_bytes,
842                                     AllocationOrigin origin) {
843   return RawRefillLabMain(size_in_bytes, origin);
844 }
845 
TryExpand(int size_in_bytes,AllocationOrigin origin)846 bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
847   Page* page = Expand();
848   if (!page) return false;
849   if (!is_compaction_space()) {
850     heap()->NotifyOldGenerationExpansion(identity(), page);
851   }
852   DCHECK((CountTotalPages() > 1) ||
853          (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
854   return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
855                                        origin);
856 }
857 
RawRefillLabMain(int size_in_bytes,AllocationOrigin origin)858 bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
859   // Non-compaction local spaces are not supported.
860   DCHECK_IMPLIES(is_local_space(), is_compaction_space());
861 
862   // Allocation in this space has failed.
863   DCHECK_GE(size_in_bytes, 0);
864   const int kMaxPagesToSweep = 1;
865 
866   if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
867 
868   MarkCompactCollector* collector = heap()->mark_compact_collector();
869   // Sweeping is still in progress.
870   if (collector->sweeping_in_progress()) {
871     // First try to refill the free-list, concurrent sweeper threads
872     // may have freed some objects in the meantime.
873     RefillFreeList();
874 
875     // Retry the free list allocation.
876     if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
877                                       origin))
878       return true;
879 
880     if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
881                                  origin))
882       return true;
883   }
884 
885   if (is_compaction_space()) {
886     // The main thread may have acquired all swept pages. Try to steal from
887     // it. This can only happen during young generation evacuation.
888     PagedSpace* main_space = heap()->paged_space(identity());
889     Page* page = main_space->RemovePageSafe(size_in_bytes);
890     if (page != nullptr) {
891       AddPage(page);
892       if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
893                                         origin))
894         return true;
895     }
896   }
897 
898   if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
899       heap()->CanExpandOldGeneration(AreaSize())) {
900     if (TryExpand(size_in_bytes, origin)) {
901       return true;
902     }
903   }
904 
905   // Try sweeping all pages.
906   if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
907     return true;
908   }
909 
910   if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
911     // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
912     // GC and give it a chance to increase the heap limit.
913     return TryExpand(size_in_bytes, origin);
914   }
915   return false;
916 }
917 
ContributeToSweepingMain(int required_freed_bytes,int max_pages,int size_in_bytes,AllocationOrigin origin)918 bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
919                                           int max_pages, int size_in_bytes,
920                                           AllocationOrigin origin) {
921   // Cleanup invalidated old-to-new refs for compaction space in the
922   // final atomic pause.
923   Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
924       is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
925                             : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
926 
927   MarkCompactCollector* collector = heap()->mark_compact_collector();
928   if (collector->sweeping_in_progress()) {
929     collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
930                                              max_pages,
931                                              invalidated_slots_in_free_space);
932     RefillFreeList();
933     return TryAllocationFromFreeListMain(size_in_bytes, origin);
934   }
935   return false;
936 }
937 
AllocateRawSlow(int size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)938 AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
939                                              AllocationAlignment alignment,
940                                              AllocationOrigin origin) {
941   if (!is_local_space()) {
942     // Start incremental marking before the actual allocation, this allows the
943     // allocation function to mark the object black when incremental marking is
944     // running.
945     heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
946         heap()->GCFlagsForIncrementalMarking(),
947         kGCCallbackScheduleIdleGarbageCollection);
948   }
949 
950 #ifdef V8_HOST_ARCH_32_BIT
951   AllocationResult result =
952       alignment != kWordAligned
953           ? AllocateRawAligned(size_in_bytes, alignment, origin)
954           : AllocateRawUnaligned(size_in_bytes, origin);
955 #else
956   AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
957 #endif
958   return result;
959 }
960 
961 // -----------------------------------------------------------------------------
962 // MapSpace implementation
963 
964 // TODO(dmercadier): use a heap instead of sorting like that.
965 // Using a heap will have multiple benefits:
966 //   - for now, SortFreeList is only called after sweeping, which is somewhat
967 //   late. Using a heap, sorting could be done online: FreeListCategories would
968 //   be inserted in a heap (ie, in a sorted manner).
969 //   - SortFreeList is a bit fragile: any change to FreeListMap (or to
970 //   MapSpace::free_list_) could break it.
SortFreeList()971 void MapSpace::SortFreeList() {
972   using LiveBytesPagePair = std::pair<size_t, Page*>;
973   std::vector<LiveBytesPagePair> pages;
974   pages.reserve(CountTotalPages());
975 
976   for (Page* p : *this) {
977     free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
978     pages.push_back(std::make_pair(p->allocated_bytes(), p));
979   }
980 
981   // Sorting by least-allocated-bytes first.
982   std::sort(pages.begin(), pages.end(),
983             [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
984               return a.first < b.first;
985             });
986 
987   for (LiveBytesPagePair const& p : pages) {
988     // Since AddCategory inserts in head position, it reverts the order produced
989     // by the sort above: least-allocated-bytes will be Added first, and will
990     // therefore be the last element (and the first one will be
991     // most-allocated-bytes).
992     free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
993   }
994 }
995 
996 #ifdef VERIFY_HEAP
VerifyObject(HeapObject object)997 void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
998 #endif
999 
1000 }  // namespace internal
1001 }  // namespace v8
1002