1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/sweeper.h"
6
7 #include "src/common/globals.h"
8 #include "src/execution/vm-state-inl.h"
9 #include "src/heap/base/active-system-pages.h"
10 #include "src/heap/code-object-registry.h"
11 #include "src/heap/free-list-inl.h"
12 #include "src/heap/gc-tracer-inl.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/invalidated-slots-inl.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/remembered-set.h"
17 #include "src/objects/objects-inl.h"
18
19 namespace v8 {
20 namespace internal {
21
Sweeper(Heap * heap,MajorNonAtomicMarkingState * marking_state)22 Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
23 : heap_(heap),
24 marking_state_(marking_state),
25 incremental_sweeper_pending_(false),
26 sweeping_in_progress_(false),
27 iterability_task_semaphore_(0),
28 iterability_in_progress_(false),
29 iterability_task_started_(false),
30 should_reduce_memory_(false) {}
31
PauseScope(Sweeper * sweeper)32 Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
33 if (!sweeper_->sweeping_in_progress()) return;
34
35 if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
36 sweeper_->job_handle_->Cancel();
37 }
38
~PauseScope()39 Sweeper::PauseScope::~PauseScope() {
40 if (!sweeper_->sweeping_in_progress()) return;
41
42 sweeper_->StartSweeperTasks();
43 }
44
FilterSweepingPagesScope(Sweeper * sweeper,const PauseScope & pause_scope)45 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
46 Sweeper* sweeper, const PauseScope& pause_scope)
47 : sweeper_(sweeper),
48 sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
49 // The PauseScope here only serves as a witness that concurrent sweeping has
50 // been paused.
51 USE(pause_scope);
52
53 if (!sweeping_in_progress_) return;
54
55 int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
56 old_space_sweeping_list_ =
57 std::move(sweeper_->sweeping_list_[old_space_index]);
58 sweeper_->sweeping_list_[old_space_index].clear();
59 }
60
~FilterSweepingPagesScope()61 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
62 DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
63 if (!sweeping_in_progress_) return;
64
65 sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
66 std::move(old_space_sweeping_list_);
67 // old_space_sweeping_list_ does not need to be cleared as we don't use it.
68 }
69
70 class Sweeper::SweeperJob final : public JobTask {
71 public:
SweeperJob(Isolate * isolate,Sweeper * sweeper)72 SweeperJob(Isolate* isolate, Sweeper* sweeper)
73 : sweeper_(sweeper), tracer_(isolate->heap()->tracer()) {}
74
75 ~SweeperJob() override = default;
76
77 SweeperJob(const SweeperJob&) = delete;
78 SweeperJob& operator=(const SweeperJob&) = delete;
79
Run(JobDelegate * delegate)80 void Run(JobDelegate* delegate) final {
81 if (delegate->IsJoiningThread()) {
82 TRACE_GC(tracer_, GCTracer::Scope::MC_SWEEP);
83 RunImpl(delegate);
84 } else {
85 TRACE_GC_EPOCH(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING,
86 ThreadKind::kBackground);
87 RunImpl(delegate);
88 }
89 }
90
GetMaxConcurrency(size_t worker_count) const91 size_t GetMaxConcurrency(size_t worker_count) const override {
92 const size_t kPagePerTask = 2;
93 return std::min<size_t>(
94 kMaxSweeperTasks,
95 worker_count +
96 (sweeper_->ConcurrentSweepingPageCount() + kPagePerTask - 1) /
97 kPagePerTask);
98 }
99
100 private:
RunImpl(JobDelegate * delegate)101 void RunImpl(JobDelegate* delegate) {
102 const int offset = delegate->GetTaskId();
103 for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
104 const AllocationSpace space_id = static_cast<AllocationSpace>(
105 FIRST_GROWABLE_PAGED_SPACE +
106 ((i + offset) % kNumberOfSweepingSpaces));
107 // Do not sweep code space concurrently.
108 if (space_id == CODE_SPACE) continue;
109 DCHECK(IsValidSweepingSpace(space_id));
110 if (!sweeper_->ConcurrentSweepSpace(space_id, delegate)) return;
111 }
112 }
113 Sweeper* const sweeper_;
114 GCTracer* const tracer_;
115 };
116
117 class Sweeper::IncrementalSweeperTask final : public CancelableTask {
118 public:
IncrementalSweeperTask(Isolate * isolate,Sweeper * sweeper)119 IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
120 : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
121
122 ~IncrementalSweeperTask() override = default;
123
124 IncrementalSweeperTask(const IncrementalSweeperTask&) = delete;
125 IncrementalSweeperTask& operator=(const IncrementalSweeperTask&) = delete;
126
127 private:
RunInternal()128 void RunInternal() final {
129 VMState<GC> state(isolate_);
130 TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
131 sweeper_->incremental_sweeper_pending_ = false;
132
133 if (sweeper_->sweeping_in_progress()) {
134 if (!sweeper_->IncrementalSweepSpace(CODE_SPACE)) {
135 sweeper_->ScheduleIncrementalSweepingTask();
136 }
137 }
138 }
139
140 Isolate* const isolate_;
141 Sweeper* const sweeper_;
142 };
143
TearDown()144 void Sweeper::TearDown() {
145 if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
146 }
147
StartSweeping()148 void Sweeper::StartSweeping() {
149 sweeping_in_progress_ = true;
150 iterability_in_progress_ = true;
151 should_reduce_memory_ = heap_->ShouldReduceMemory();
152 MajorNonAtomicMarkingState* marking_state =
153 heap_->mark_compact_collector()->non_atomic_marking_state();
154 ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
155 // Sorting is done in order to make compaction more efficient: by sweeping
156 // pages with the most free bytes first, we make it more likely that when
157 // evacuating a page, already swept pages will have enough free bytes to
158 // hold the objects to move (and therefore, we won't need to wait for more
159 // pages to be swept in order to move those objects).
160 // We sort in descending order of live bytes, i.e., ascending order of free
161 // bytes, because GetSweepingPageSafe returns pages in reverse order.
162 int space_index = GetSweepSpaceIndex(space);
163 std::sort(
164 sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
165 [marking_state](Page* a, Page* b) {
166 return marking_state->live_bytes(a) > marking_state->live_bytes(b);
167 });
168 });
169 }
170
StartSweeperTasks()171 void Sweeper::StartSweeperTasks() {
172 DCHECK(!job_handle_ || !job_handle_->IsValid());
173 if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
174 !heap_->delay_sweeper_tasks_for_testing_) {
175 job_handle_ = V8::GetCurrentPlatform()->PostJob(
176 TaskPriority::kUserVisible,
177 std::make_unique<SweeperJob>(heap_->isolate(), this));
178 ScheduleIncrementalSweepingTask();
179 }
180 }
181
GetSweptPageSafe(PagedSpace * space)182 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
183 base::MutexGuard guard(&mutex_);
184 SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
185 if (!list.empty()) {
186 auto last_page = list.back();
187 list.pop_back();
188 return last_page;
189 }
190 return nullptr;
191 }
192
EnsureCompleted()193 void Sweeper::EnsureCompleted() {
194 if (!sweeping_in_progress_) return;
195
196 EnsureIterabilityCompleted();
197
198 // If sweeping is not completed or not running at all, we try to complete it
199 // here.
200 ForAllSweepingSpaces([this](AllocationSpace space) {
201 ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
202 });
203
204 if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
205
206 ForAllSweepingSpaces([this](AllocationSpace space) {
207 CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
208 });
209 sweeping_in_progress_ = false;
210 }
211
DrainSweepingWorklistForSpace(AllocationSpace space)212 void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
213 if (!sweeping_in_progress_) return;
214 ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
215 }
216
SupportConcurrentSweeping()217 void Sweeper::SupportConcurrentSweeping() {
218 ForAllSweepingSpaces([this](AllocationSpace space) {
219 const int kMaxPagesToSweepPerSpace = 1;
220 ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
221 kMaxPagesToSweepPerSpace);
222 });
223 }
224
AreSweeperTasksRunning()225 bool Sweeper::AreSweeperTasksRunning() {
226 return job_handle_ && job_handle_->IsValid() && job_handle_->IsActive();
227 }
228
FreeAndProcessFreedMemory(Address free_start,Address free_end,Page * page,Space * space,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)229 V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
230 Address free_start, Address free_end, Page* page, Space* space,
231 FreeListRebuildingMode free_list_mode,
232 FreeSpaceTreatmentMode free_space_mode) {
233 CHECK_GT(free_end, free_start);
234 size_t freed_bytes = 0;
235 size_t size = static_cast<size_t>(free_end - free_start);
236 if (free_space_mode == ZAP_FREE_SPACE) {
237 ZapCode(free_start, size);
238 }
239 ClearFreedMemoryMode clear_memory_mode =
240 (free_list_mode == REBUILD_FREE_LIST)
241 ? ClearFreedMemoryMode::kDontClearFreedMemory
242 : ClearFreedMemoryMode::kClearFreedMemory;
243 page->heap()->CreateFillerObjectAtBackground(
244 free_start, static_cast<int>(size), clear_memory_mode);
245 if (free_list_mode == REBUILD_FREE_LIST) {
246 freed_bytes =
247 reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
248 }
249 if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
250
251 return freed_bytes;
252 }
253
CleanupRememberedSetEntriesForFreedMemory(Address free_start,Address free_end,Page * page,bool record_free_ranges,TypedSlotSet::FreeRangesMap * free_ranges_map,SweepingMode sweeping_mode,InvalidatedSlotsCleanup * old_to_new_cleanup)254 V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
255 Address free_start, Address free_end, Page* page, bool record_free_ranges,
256 TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
257 InvalidatedSlotsCleanup* old_to_new_cleanup) {
258 DCHECK_LE(free_start, free_end);
259 if (sweeping_mode == SweepingMode::kEagerDuringGC) {
260 // New space and in consequence the old-to-new remembered set is always
261 // empty after a full GC, so we do not need to remove from it after the full
262 // GC. However, we wouldn't even be allowed to do that, since the main
263 // thread then owns the old-to-new remembered set. Removing from it from a
264 // sweeper thread would race with the main thread.
265 RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
266 SlotSet::KEEP_EMPTY_BUCKETS);
267
268 // While we only add old-to-old slots on live objects, we can still end up
269 // with old-to-old slots in free memory with e.g. right-trimming of objects.
270 RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
271 SlotSet::KEEP_EMPTY_BUCKETS);
272 } else {
273 DCHECK_NULL(page->slot_set<OLD_TO_OLD>());
274 }
275
276 // Old-to-shared isn't reset after a full GC, so needs to be cleaned both
277 // during and after a full GC.
278 RememberedSet<OLD_TO_SHARED>::RemoveRange(page, free_start, free_end,
279 SlotSet::KEEP_EMPTY_BUCKETS);
280
281 if (record_free_ranges) {
282 free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
283 static_cast<uint32_t>(free_start - page->address()),
284 static_cast<uint32_t>(free_end - page->address())));
285 }
286
287 old_to_new_cleanup->Free(free_start, free_end);
288 }
289
CleanupInvalidTypedSlotsOfFreeRanges(Page * page,const TypedSlotSet::FreeRangesMap & free_ranges_map,SweepingMode sweeping_mode)290 void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
291 Page* page, const TypedSlotSet::FreeRangesMap& free_ranges_map,
292 SweepingMode sweeping_mode) {
293 if (sweeping_mode == SweepingMode::kEagerDuringGC) {
294 page->ClearInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
295
296 // Typed old-to-old slot sets are only ever recorded in live code objects.
297 // Also code objects are never right-trimmed, so there cannot be any slots
298 // in a free range.
299 page->AssertNoInvalidTypedSlots<OLD_TO_OLD>(free_ranges_map);
300
301 page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
302 return;
303 }
304
305 DCHECK_EQ(sweeping_mode, SweepingMode::kLazyOrConcurrent);
306
307 // After a full GC there are no old-to-new typed slots. The main thread
308 // could create new slots but not in a free range.
309 page->AssertNoInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
310 DCHECK_NULL(page->typed_slot_set<OLD_TO_OLD>());
311 page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
312 }
313
ClearMarkBitsAndHandleLivenessStatistics(Page * page,size_t live_bytes,FreeListRebuildingMode free_list_mode)314 void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
315 Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode) {
316 marking_state_->bitmap(page)->Clear();
317 if (free_list_mode == IGNORE_FREE_LIST) {
318 marking_state_->SetLiveBytes(page, 0);
319 // We did not free memory, so have to adjust allocated bytes here.
320 intptr_t freed_bytes = page->area_size() - live_bytes;
321 page->DecreaseAllocatedBytes(freed_bytes);
322 } else {
323 // Keep the old live bytes counter of the page until RefillFreeList, where
324 // the space size is refined.
325 // The allocated_bytes() counter is precisely the total size of objects.
326 DCHECK_EQ(live_bytes, page->allocated_bytes());
327 }
328 }
329
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode,SweepingMode sweeping_mode,const base::MutexGuard & page_guard)330 int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
331 FreeSpaceTreatmentMode free_space_mode,
332 SweepingMode sweeping_mode,
333 const base::MutexGuard& page_guard) {
334 Space* space = p->owner();
335 DCHECK_NOT_NULL(space);
336 DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
337 space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
338 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
339
340 // Phase 1: Prepare the page for sweeping.
341
342 // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
343 // counter. The free operations below will decrease allocated_bytes_ to actual
344 // live bytes and keep track of wasted_memory_.
345 p->ResetAllocationStatistics();
346
347 CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
348 if (code_object_registry) code_object_registry->Clear();
349
350 base::Optional<ActiveSystemPages> active_system_pages_after_sweeping;
351 if (should_reduce_memory_) {
352 // Only decrement counter when we discard unused system pages.
353 active_system_pages_after_sweeping = ActiveSystemPages();
354 active_system_pages_after_sweeping->Init(
355 MemoryChunkLayout::kMemoryChunkHeaderSize,
356 MemoryAllocator::GetCommitPageSizeBits(), Page::kPageSize);
357 }
358
359 // Phase 2: Free the non-live memory and clean-up the regular remembered set
360 // entires.
361
362 // Liveness and freeing statistics.
363 size_t live_bytes = 0;
364 size_t max_freed_bytes = 0;
365
366 bool record_free_ranges = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
367 p->typed_slot_set<OLD_TO_OLD>() != nullptr ||
368 p->typed_slot_set<OLD_TO_SHARED>() != nullptr ||
369 DEBUG_BOOL;
370
371 // Clean invalidated slots during the final atomic pause. After resuming
372 // execution this isn't necessary, invalid old-to-new refs were already
373 // removed by mark compact's update pointers phase.
374 InvalidatedSlotsCleanup old_to_new_cleanup =
375 InvalidatedSlotsCleanup::NoCleanup(p);
376 if (sweeping_mode == SweepingMode::kEagerDuringGC)
377 old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
378
379 // The free ranges map is used for filtering typed slots.
380 TypedSlotSet::FreeRangesMap free_ranges_map;
381
382 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
383 p->object_start_bitmap()->Clear();
384 #endif
385
386 // Iterate over the page using the live objects and free the memory before
387 // the given live object.
388 Address free_start = p->area_start();
389 PtrComprCageBase cage_base(heap_->isolate());
390 for (auto object_and_size :
391 LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
392 HeapObject const object = object_and_size.first;
393 if (code_object_registry)
394 code_object_registry->RegisterAlreadyExistingCodeObject(object.address());
395 DCHECK(marking_state_->IsBlack(object));
396 Address free_end = object.address();
397 if (free_end != free_start) {
398 max_freed_bytes =
399 std::max(max_freed_bytes,
400 FreeAndProcessFreedMemory(free_start, free_end, p, space,
401 free_list_mode, free_space_mode));
402 CleanupRememberedSetEntriesForFreedMemory(
403 free_start, free_end, p, record_free_ranges, &free_ranges_map,
404 sweeping_mode, &old_to_new_cleanup);
405 }
406 Map map = object.map(cage_base, kAcquireLoad);
407 // Map might be forwarded during GC.
408 DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
409 int size = object.SizeFromMap(map);
410 live_bytes += size;
411 free_start = free_end + size;
412
413 if (active_system_pages_after_sweeping) {
414 active_system_pages_after_sweeping->Add(
415 free_end - p->address(), free_start - p->address(),
416 MemoryAllocator::GetCommitPageSizeBits());
417 }
418
419 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
420 p->object_start_bitmap()->SetBit(object.address());
421 #endif
422 }
423
424 // If there is free memory after the last live object also free that.
425 Address free_end = p->area_end();
426 if (free_end != free_start) {
427 max_freed_bytes =
428 std::max(max_freed_bytes,
429 FreeAndProcessFreedMemory(free_start, free_end, p, space,
430 free_list_mode, free_space_mode));
431 CleanupRememberedSetEntriesForFreedMemory(
432 free_start, free_end, p, record_free_ranges, &free_ranges_map,
433 sweeping_mode, &old_to_new_cleanup);
434 }
435
436 // Phase 3: Post process the page.
437 CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map, sweeping_mode);
438 ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
439
440 if (active_system_pages_after_sweeping) {
441 // Decrement accounted memory for discarded memory.
442 PagedSpace* paged_space = static_cast<PagedSpace*>(p->owner());
443 paged_space->ReduceActiveSystemPages(p,
444 *active_system_pages_after_sweeping);
445 }
446
447 if (code_object_registry) code_object_registry->Finalize();
448 p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
449 if (free_list_mode == IGNORE_FREE_LIST) return 0;
450
451 return static_cast<int>(
452 p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
453 }
454
ConcurrentSweepingPageCount()455 size_t Sweeper::ConcurrentSweepingPageCount() {
456 base::MutexGuard guard(&mutex_);
457 return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
458 sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
459 }
460
ConcurrentSweepSpace(AllocationSpace identity,JobDelegate * delegate)461 bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
462 JobDelegate* delegate) {
463 while (!delegate->ShouldYield()) {
464 Page* page = GetSweepingPageSafe(identity);
465 if (page == nullptr) return true;
466 // Typed slot sets are only recorded on code pages. Code pages
467 // are not swept concurrently to the application to ensure W^X.
468 DCHECK_NULL((page->typed_slot_set<OLD_TO_NEW>()));
469 DCHECK_NULL((page->typed_slot_set<OLD_TO_OLD>()));
470 ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
471 }
472 return false;
473 }
474
IncrementalSweepSpace(AllocationSpace identity)475 bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
476 TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
477 ThreadKind::kMain);
478 const double start = heap_->MonotonicallyIncreasingTimeInMs();
479 if (Page* page = GetSweepingPageSafe(identity)) {
480 ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
481 }
482 const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
483 heap_->tracer()->AddIncrementalSweepingStep(duration);
484 return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
485 }
486
ParallelSweepSpace(AllocationSpace identity,SweepingMode sweeping_mode,int required_freed_bytes,int max_pages)487 int Sweeper::ParallelSweepSpace(AllocationSpace identity,
488 SweepingMode sweeping_mode,
489 int required_freed_bytes, int max_pages) {
490 int max_freed = 0;
491 int pages_freed = 0;
492 Page* page = nullptr;
493 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
494 int freed = ParallelSweepPage(page, identity, sweeping_mode);
495 ++pages_freed;
496 if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
497 // Free list of a never-allocate page will be dropped later on.
498 continue;
499 }
500 DCHECK_GE(freed, 0);
501 max_freed = std::max(max_freed, freed);
502 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
503 return max_freed;
504 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
505 }
506 return max_freed;
507 }
508
ParallelSweepPage(Page * page,AllocationSpace identity,SweepingMode sweeping_mode)509 int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
510 SweepingMode sweeping_mode) {
511 DCHECK(IsValidSweepingSpace(identity));
512
513 // The Scavenger may add already swept pages back.
514 if (page->SweepingDone()) return 0;
515
516 int max_freed = 0;
517 {
518 base::MutexGuard guard(page->mutex());
519 DCHECK(!page->SweepingDone());
520 // If the page is a code page, the CodePageMemoryModificationScope changes
521 // the page protection mode from rx -> rw while sweeping.
522 CodePageMemoryModificationScope code_page_scope(page);
523
524 DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
525 page->concurrent_sweeping_state());
526 page->set_concurrent_sweeping_state(
527 Page::ConcurrentSweepingState::kInProgress);
528 const FreeSpaceTreatmentMode free_space_mode =
529 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
530 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
531 sweeping_mode, guard);
532 DCHECK(page->SweepingDone());
533 }
534
535 {
536 base::MutexGuard guard(&mutex_);
537 swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
538 cv_page_swept_.NotifyAll();
539 }
540 return max_freed;
541 }
542
EnsurePageIsSwept(Page * page)543 void Sweeper::EnsurePageIsSwept(Page* page) {
544 if (!sweeping_in_progress() || page->SweepingDone()) return;
545 AllocationSpace space = page->owner_identity();
546
547 if (IsValidSweepingSpace(space)) {
548 if (TryRemoveSweepingPageSafe(space, page)) {
549 // Page was successfully removed and can now be swept.
550 ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
551 } else {
552 // Some sweeper task already took ownership of that page, wait until
553 // sweeping is finished.
554 base::MutexGuard guard(&mutex_);
555 while (!page->SweepingDone()) {
556 cv_page_swept_.Wait(&mutex_);
557 }
558 }
559 } else {
560 DCHECK(page->InNewSpace());
561 EnsureIterabilityCompleted();
562 }
563
564 CHECK(page->SweepingDone());
565 }
566
TryRemoveSweepingPageSafe(AllocationSpace space,Page * page)567 bool Sweeper::TryRemoveSweepingPageSafe(AllocationSpace space, Page* page) {
568 base::MutexGuard guard(&mutex_);
569 DCHECK(IsValidSweepingSpace(space));
570 int space_index = GetSweepSpaceIndex(space);
571 SweepingList& sweeping_list = sweeping_list_[space_index];
572 SweepingList::iterator position =
573 std::find(sweeping_list.begin(), sweeping_list.end(), page);
574 if (position == sweeping_list.end()) return false;
575 sweeping_list.erase(position);
576 return true;
577 }
578
ScheduleIncrementalSweepingTask()579 void Sweeper::ScheduleIncrementalSweepingTask() {
580 if (!incremental_sweeper_pending_) {
581 incremental_sweeper_pending_ = true;
582 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
583 auto taskrunner =
584 V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
585 taskrunner->PostTask(
586 std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
587 }
588 }
589
AddPage(AllocationSpace space,Page * page,Sweeper::AddPageMode mode)590 void Sweeper::AddPage(AllocationSpace space, Page* page,
591 Sweeper::AddPageMode mode) {
592 base::MutexGuard guard(&mutex_);
593 DCHECK(IsValidSweepingSpace(space));
594 DCHECK(!FLAG_concurrent_sweeping || !job_handle_ || !job_handle_->IsValid());
595 if (mode == Sweeper::REGULAR) {
596 PrepareToBeSweptPage(space, page);
597 } else {
598 // Page has been temporarily removed from the sweeper. Accounting already
599 // happened when the page was initially added, so it is skipped here.
600 DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
601 }
602 DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
603 page->concurrent_sweeping_state());
604 sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
605 }
606
PrepareToBeSweptPage(AllocationSpace space,Page * page)607 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
608 #ifdef DEBUG
609 DCHECK_GE(page->area_size(),
610 static_cast<size_t>(marking_state_->live_bytes(page)));
611 DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
612 page->concurrent_sweeping_state());
613 page->ForAllFreeListCategories([page](FreeListCategory* category) {
614 DCHECK(!category->is_linked(page->owner()->free_list()));
615 });
616 #endif // DEBUG
617 page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
618 heap_->paged_space(space)->IncreaseAllocatedBytes(
619 marking_state_->live_bytes(page), page);
620 }
621
GetSweepingPageSafe(AllocationSpace space)622 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
623 base::MutexGuard guard(&mutex_);
624 DCHECK(IsValidSweepingSpace(space));
625 int space_index = GetSweepSpaceIndex(space);
626 Page* page = nullptr;
627 if (!sweeping_list_[space_index].empty()) {
628 page = sweeping_list_[space_index].back();
629 sweeping_list_[space_index].pop_back();
630 }
631 return page;
632 }
633
EnsureIterabilityCompleted()634 void Sweeper::EnsureIterabilityCompleted() {
635 if (!iterability_in_progress_) return;
636
637 if (FLAG_concurrent_sweeping && iterability_task_started_) {
638 if (heap_->isolate()->cancelable_task_manager()->TryAbort(
639 iterability_task_id_) != TryAbortResult::kTaskAborted) {
640 iterability_task_semaphore_.Wait();
641 }
642 iterability_task_started_ = false;
643 }
644
645 for (Page* page : iterability_list_) {
646 MakeIterable(page);
647 }
648 iterability_list_.clear();
649 iterability_in_progress_ = false;
650 }
651
652 class Sweeper::IterabilityTask final : public CancelableTask {
653 public:
IterabilityTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_iterability_task)654 IterabilityTask(Isolate* isolate, Sweeper* sweeper,
655 base::Semaphore* pending_iterability_task)
656 : CancelableTask(isolate),
657 sweeper_(sweeper),
658 pending_iterability_task_(pending_iterability_task),
659 tracer_(isolate->heap()->tracer()) {}
660
661 ~IterabilityTask() override = default;
662
663 IterabilityTask(const IterabilityTask&) = delete;
664 IterabilityTask& operator=(const IterabilityTask&) = delete;
665
666 private:
RunInternal()667 void RunInternal() final {
668 TRACE_GC_EPOCH(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING,
669 ThreadKind::kBackground);
670 for (Page* page : sweeper_->iterability_list_) {
671 sweeper_->MakeIterable(page);
672 }
673 sweeper_->iterability_list_.clear();
674 pending_iterability_task_->Signal();
675 }
676
677 Sweeper* const sweeper_;
678 base::Semaphore* const pending_iterability_task_;
679 GCTracer* const tracer_;
680 };
681
StartIterabilityTasks()682 void Sweeper::StartIterabilityTasks() {
683 if (!iterability_in_progress_) return;
684
685 DCHECK(!iterability_task_started_);
686 if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
687 auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
688 &iterability_task_semaphore_);
689 iterability_task_id_ = task->id();
690 iterability_task_started_ = true;
691 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
692 }
693 }
694
AddPageForIterability(Page * page)695 void Sweeper::AddPageForIterability(Page* page) {
696 DCHECK(sweeping_in_progress_);
697 DCHECK(iterability_in_progress_);
698 DCHECK(!iterability_task_started_);
699 DCHECK(IsValidIterabilitySpace(page->owner_identity()));
700 DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
701 page->concurrent_sweeping_state());
702
703 iterability_list_.push_back(page);
704 page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
705 }
706
MakeIterable(Page * page)707 void Sweeper::MakeIterable(Page* page) {
708 base::MutexGuard guard(page->mutex());
709 DCHECK(IsValidIterabilitySpace(page->owner_identity()));
710 const FreeSpaceTreatmentMode free_space_mode =
711 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
712 RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
713 SweepingMode::kLazyOrConcurrent, guard);
714 }
715
716 } // namespace internal
717 } // namespace v8
718