1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/sweeper.h"
6
7 #include "src/base/template-utils.h"
8 #include "src/heap/array-buffer-tracker-inl.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/mark-compact-inl.h"
11 #include "src/heap/remembered-set.h"
12 #include "src/objects-inl.h"
13 #include "src/vm-state-inl.h"
14
15 namespace v8 {
16 namespace internal {
17
PauseOrCompleteScope(Sweeper * sweeper)18 Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
19 : sweeper_(sweeper) {
20 sweeper_->stop_sweeper_tasks_ = true;
21 if (!sweeper_->sweeping_in_progress()) return;
22
23 sweeper_->AbortAndWaitForTasks();
24
25 // Complete sweeping if there's nothing more to do.
26 if (sweeper_->IsDoneSweeping()) {
27 sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
28 DCHECK(!sweeper_->sweeping_in_progress());
29 } else {
30 // Unless sweeping is complete the flag still indicates that the sweeper
31 // is enabled. It just cannot use tasks anymore.
32 DCHECK(sweeper_->sweeping_in_progress());
33 }
34 }
35
~PauseOrCompleteScope()36 Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
37 sweeper_->stop_sweeper_tasks_ = false;
38 if (!sweeper_->sweeping_in_progress()) return;
39
40 sweeper_->StartSweeperTasks();
41 }
42
FilterSweepingPagesScope(Sweeper * sweeper,const PauseOrCompleteScope & pause_or_complete_scope)43 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
44 Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
45 : sweeper_(sweeper),
46 pause_or_complete_scope_(pause_or_complete_scope),
47 sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
48 USE(pause_or_complete_scope_);
49 if (!sweeping_in_progress_) return;
50
51 int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
52 old_space_sweeping_list_ =
53 std::move(sweeper_->sweeping_list_[old_space_index]);
54 sweeper_->sweeping_list_[old_space_index].clear();
55 }
56
~FilterSweepingPagesScope()57 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
58 DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
59 if (!sweeping_in_progress_) return;
60
61 sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
62 std::move(old_space_sweeping_list_);
63 // old_space_sweeping_list_ does not need to be cleared as we don't use it.
64 }
65
66 class Sweeper::SweeperTask final : public CancelableTask {
67 public:
SweeperTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,std::atomic<intptr_t> * num_sweeping_tasks,AllocationSpace space_to_start)68 SweeperTask(Isolate* isolate, Sweeper* sweeper,
69 base::Semaphore* pending_sweeper_tasks,
70 std::atomic<intptr_t>* num_sweeping_tasks,
71 AllocationSpace space_to_start)
72 : CancelableTask(isolate),
73 sweeper_(sweeper),
74 pending_sweeper_tasks_(pending_sweeper_tasks),
75 num_sweeping_tasks_(num_sweeping_tasks),
76 space_to_start_(space_to_start),
77 tracer_(isolate->heap()->tracer()) {}
78
~SweeperTask()79 virtual ~SweeperTask() {}
80
81 private:
RunInternal()82 void RunInternal() final {
83 TRACE_BACKGROUND_GC(tracer_,
84 GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
85 DCHECK(IsValidSweepingSpace(space_to_start_));
86 const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
87 for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
88 const AllocationSpace space_id = static_cast<AllocationSpace>(
89 FIRST_GROWABLE_PAGED_SPACE +
90 ((i + offset) % kNumberOfSweepingSpaces));
91 // Do not sweep code space concurrently.
92 if (space_id == CODE_SPACE) continue;
93 DCHECK(IsValidSweepingSpace(space_id));
94 sweeper_->SweepSpaceFromTask(space_id);
95 }
96 (*num_sweeping_tasks_)--;
97 pending_sweeper_tasks_->Signal();
98 }
99
100 Sweeper* const sweeper_;
101 base::Semaphore* const pending_sweeper_tasks_;
102 std::atomic<intptr_t>* const num_sweeping_tasks_;
103 AllocationSpace space_to_start_;
104 GCTracer* const tracer_;
105
106 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
107 };
108
109 class Sweeper::IncrementalSweeperTask final : public CancelableTask {
110 public:
IncrementalSweeperTask(Isolate * isolate,Sweeper * sweeper)111 IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
112 : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
113
~IncrementalSweeperTask()114 virtual ~IncrementalSweeperTask() {}
115
116 private:
RunInternal()117 void RunInternal() final {
118 VMState<GC> state(isolate_);
119 TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
120
121 sweeper_->incremental_sweeper_pending_ = false;
122
123 if (sweeper_->sweeping_in_progress()) {
124 if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
125 sweeper_->ScheduleIncrementalSweepingTask();
126 }
127 }
128 }
129
130 Isolate* const isolate_;
131 Sweeper* const sweeper_;
132 DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
133 };
134
StartSweeping()135 void Sweeper::StartSweeping() {
136 CHECK(!stop_sweeper_tasks_);
137 sweeping_in_progress_ = true;
138 iterability_in_progress_ = true;
139 MajorNonAtomicMarkingState* marking_state =
140 heap_->mark_compact_collector()->non_atomic_marking_state();
141 ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
142 int space_index = GetSweepSpaceIndex(space);
143 std::sort(sweeping_list_[space_index].begin(),
144 sweeping_list_[space_index].end(),
145 [marking_state](Page* a, Page* b) {
146 return marking_state->live_bytes(a) <
147 marking_state->live_bytes(b);
148 });
149 });
150 }
151
StartSweeperTasks()152 void Sweeper::StartSweeperTasks() {
153 DCHECK_EQ(0, num_tasks_);
154 DCHECK_EQ(0, num_sweeping_tasks_);
155 if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
156 !heap_->delay_sweeper_tasks_for_testing_) {
157 ForAllSweepingSpaces([this](AllocationSpace space) {
158 DCHECK(IsValidSweepingSpace(space));
159 num_sweeping_tasks_++;
160 auto task = base::make_unique<SweeperTask>(
161 heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
162 &num_sweeping_tasks_, space);
163 DCHECK_LT(num_tasks_, kMaxSweeperTasks);
164 task_ids_[num_tasks_++] = task->id();
165 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
166 });
167 ScheduleIncrementalSweepingTask();
168 }
169 }
170
SweepOrWaitUntilSweepingCompleted(Page * page)171 void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
172 if (!page->SweepingDone()) {
173 ParallelSweepPage(page, page->owner()->identity());
174 if (!page->SweepingDone()) {
175 // We were not able to sweep that page, i.e., a concurrent
176 // sweeper thread currently owns this page. Wait for the sweeper
177 // thread to be done with this page.
178 page->WaitUntilSweepingCompleted();
179 }
180 }
181 }
182
GetSweptPageSafe(PagedSpace * space)183 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
184 base::LockGuard<base::Mutex> guard(&mutex_);
185 SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
186 if (!list.empty()) {
187 auto last_page = list.back();
188 list.pop_back();
189 return last_page;
190 }
191 return nullptr;
192 }
193
AbortAndWaitForTasks()194 void Sweeper::AbortAndWaitForTasks() {
195 if (!FLAG_concurrent_sweeping) return;
196
197 for (int i = 0; i < num_tasks_; i++) {
198 if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
199 CancelableTaskManager::kTaskAborted) {
200 pending_sweeper_tasks_semaphore_.Wait();
201 } else {
202 // Aborted case.
203 num_sweeping_tasks_--;
204 }
205 }
206 num_tasks_ = 0;
207 DCHECK_EQ(0, num_sweeping_tasks_);
208 }
209
EnsureCompleted()210 void Sweeper::EnsureCompleted() {
211 if (!sweeping_in_progress_) return;
212
213 EnsureIterabilityCompleted();
214
215 // If sweeping is not completed or not running at all, we try to complete it
216 // here.
217 ForAllSweepingSpaces(
218 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
219
220 AbortAndWaitForTasks();
221
222 ForAllSweepingSpaces([this](AllocationSpace space) {
223 CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
224 });
225 sweeping_in_progress_ = false;
226 }
227
AreSweeperTasksRunning()228 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
229
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)230 int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
231 FreeSpaceTreatmentMode free_space_mode) {
232 Space* space = p->owner();
233 DCHECK_NOT_NULL(space);
234 DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
235 space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
236 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
237
238 // TODO(ulan): we don't have to clear type old-to-old slots in code space
239 // because the concurrent marker doesn't mark code objects. This requires
240 // the write barrier for code objects to check the color of the code object.
241 bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
242 p->typed_slot_set<OLD_TO_OLD>() != nullptr;
243
244 // The free ranges map is used for filtering typed slots.
245 std::map<uint32_t, uint32_t> free_ranges;
246
247 // Before we sweep objects on the page, we free dead array buffers which
248 // requires valid mark bits.
249 ArrayBufferTracker::FreeDead(p, marking_state_);
250
251 Address free_start = p->area_start();
252 DCHECK_EQ(0, free_start % (32 * kPointerSize));
253
254 // If we use the skip list for code space pages, we have to lock the skip
255 // list because it could be accessed concurrently by the runtime or the
256 // deoptimizer.
257 const bool rebuild_skip_list =
258 space->identity() == CODE_SPACE && p->skip_list() != nullptr;
259 SkipList* skip_list = p->skip_list();
260 if (rebuild_skip_list) {
261 skip_list->Clear();
262 }
263
264 intptr_t live_bytes = 0;
265 intptr_t freed_bytes = 0;
266 intptr_t max_freed_bytes = 0;
267 int curr_region = -1;
268
269 // Set the allocated_bytes counter to area_size. The free operations below
270 // will decrease the counter to actual live bytes.
271 p->ResetAllocatedBytes();
272
273 for (auto object_and_size :
274 LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
275 HeapObject* const object = object_and_size.first;
276 DCHECK(marking_state_->IsBlack(object));
277 Address free_end = object->address();
278 if (free_end != free_start) {
279 CHECK_GT(free_end, free_start);
280 size_t size = static_cast<size_t>(free_end - free_start);
281 if (free_space_mode == ZAP_FREE_SPACE) {
282 ZapCode(free_start, size);
283 }
284 if (free_list_mode == REBUILD_FREE_LIST) {
285 freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
286 free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
287 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
288 } else {
289 p->heap()->CreateFillerObjectAt(
290 free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
291 ClearFreedMemoryMode::kClearFreedMemory);
292 }
293 RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
294 SlotSet::KEEP_EMPTY_BUCKETS);
295 RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
296 SlotSet::KEEP_EMPTY_BUCKETS);
297 if (non_empty_typed_slots) {
298 free_ranges.insert(std::pair<uint32_t, uint32_t>(
299 static_cast<uint32_t>(free_start - p->address()),
300 static_cast<uint32_t>(free_end - p->address())));
301 }
302 }
303 Map* map = object->synchronized_map();
304 int size = object->SizeFromMap(map);
305 live_bytes += size;
306 if (rebuild_skip_list) {
307 int new_region_start = SkipList::RegionNumber(free_end);
308 int new_region_end =
309 SkipList::RegionNumber(free_end + size - kPointerSize);
310 if (new_region_start != curr_region || new_region_end != curr_region) {
311 skip_list->AddObject(free_end, size);
312 curr_region = new_region_end;
313 }
314 }
315 free_start = free_end + size;
316 }
317
318 if (free_start != p->area_end()) {
319 CHECK_GT(p->area_end(), free_start);
320 size_t size = static_cast<size_t>(p->area_end() - free_start);
321 if (free_space_mode == ZAP_FREE_SPACE) {
322 ZapCode(free_start, size);
323 }
324 if (free_list_mode == REBUILD_FREE_LIST) {
325 freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
326 free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
327 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
328 } else {
329 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
330 ClearRecordedSlots::kNo,
331 ClearFreedMemoryMode::kClearFreedMemory);
332 }
333
334 RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
335 SlotSet::KEEP_EMPTY_BUCKETS);
336 RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
337 SlotSet::KEEP_EMPTY_BUCKETS);
338 if (non_empty_typed_slots) {
339 free_ranges.insert(std::pair<uint32_t, uint32_t>(
340 static_cast<uint32_t>(free_start - p->address()),
341 static_cast<uint32_t>(p->area_end() - p->address())));
342 }
343 }
344
345 // Clear invalid typed slots after collection all free ranges.
346 if (!free_ranges.empty()) {
347 TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
348 if (old_to_new != nullptr) {
349 old_to_new->RemoveInvaldSlots(free_ranges);
350 }
351 TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
352 if (old_to_old != nullptr) {
353 old_to_old->RemoveInvaldSlots(free_ranges);
354 }
355 }
356
357 marking_state_->bitmap(p)->Clear();
358 if (free_list_mode == IGNORE_FREE_LIST) {
359 marking_state_->SetLiveBytes(p, 0);
360 // We did not free memory, so have to adjust allocated bytes here.
361 intptr_t freed_bytes = p->area_size() - live_bytes;
362 p->DecreaseAllocatedBytes(freed_bytes);
363 } else {
364 // Keep the old live bytes counter of the page until RefillFreeList, where
365 // the space size is refined.
366 // The allocated_bytes() counter is precisely the total size of objects.
367 DCHECK_EQ(live_bytes, p->allocated_bytes());
368 }
369 p->set_concurrent_sweeping_state(Page::kSweepingDone);
370 if (free_list_mode == IGNORE_FREE_LIST) return 0;
371 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
372 }
373
SweepSpaceFromTask(AllocationSpace identity)374 void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
375 Page* page = nullptr;
376 while (!stop_sweeper_tasks_ &&
377 ((page = GetSweepingPageSafe(identity)) != nullptr)) {
378 ParallelSweepPage(page, identity);
379 }
380 }
381
SweepSpaceIncrementallyFromTask(AllocationSpace identity)382 bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
383 if (Page* page = GetSweepingPageSafe(identity)) {
384 ParallelSweepPage(page, identity);
385 }
386 return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
387 }
388
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages)389 int Sweeper::ParallelSweepSpace(AllocationSpace identity,
390 int required_freed_bytes, int max_pages) {
391 int max_freed = 0;
392 int pages_freed = 0;
393 Page* page = nullptr;
394 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
395 int freed = ParallelSweepPage(page, identity);
396 pages_freed += 1;
397 DCHECK_GE(freed, 0);
398 max_freed = Max(max_freed, freed);
399 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
400 return max_freed;
401 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
402 }
403 return max_freed;
404 }
405
ParallelSweepPage(Page * page,AllocationSpace identity)406 int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
407 // Early bailout for pages that are swept outside of the regular sweeping
408 // path. This check here avoids taking the lock first, avoiding deadlocks.
409 if (page->SweepingDone()) return 0;
410
411 DCHECK(IsValidSweepingSpace(identity));
412 int max_freed = 0;
413 {
414 base::LockGuard<base::Mutex> guard(page->mutex());
415 // If this page was already swept in the meantime, we can return here.
416 if (page->SweepingDone()) return 0;
417
418 // If the page is a code page, the CodePageMemoryModificationScope changes
419 // the page protection mode from rx -> rw while sweeping.
420 CodePageMemoryModificationScope code_page_scope(page);
421
422 DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
423 page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
424 const FreeSpaceTreatmentMode free_space_mode =
425 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
426 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
427 DCHECK(page->SweepingDone());
428
429 // After finishing sweeping of a page we clean up its remembered set.
430 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
431 if (typed_slot_set) {
432 typed_slot_set->FreeToBeFreedChunks();
433 }
434 SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
435 if (slot_set) {
436 slot_set->FreeToBeFreedBuckets();
437 }
438 }
439
440 {
441 base::LockGuard<base::Mutex> guard(&mutex_);
442 swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
443 }
444 return max_freed;
445 }
446
ScheduleIncrementalSweepingTask()447 void Sweeper::ScheduleIncrementalSweepingTask() {
448 if (!incremental_sweeper_pending_) {
449 incremental_sweeper_pending_ = true;
450 IncrementalSweeperTask* task =
451 new IncrementalSweeperTask(heap_->isolate(), this);
452 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
453 V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
454 }
455 }
456
AddPage(AllocationSpace space,Page * page,Sweeper::AddPageMode mode)457 void Sweeper::AddPage(AllocationSpace space, Page* page,
458 Sweeper::AddPageMode mode) {
459 base::LockGuard<base::Mutex> guard(&mutex_);
460 DCHECK(IsValidSweepingSpace(space));
461 DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
462 if (mode == Sweeper::REGULAR) {
463 PrepareToBeSweptPage(space, page);
464 } else {
465 // Page has been temporarily removed from the sweeper. Accounting already
466 // happened when the page was initially added, so it is skipped here.
467 DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
468 }
469 DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
470 sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
471 }
472
PrepareToBeSweptPage(AllocationSpace space,Page * page)473 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
474 DCHECK_GE(page->area_size(),
475 static_cast<size_t>(marking_state_->live_bytes(page)));
476 DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
477 page->ForAllFreeListCategories(
478 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
479 page->set_concurrent_sweeping_state(Page::kSweepingPending);
480 heap_->paged_space(space)->IncreaseAllocatedBytes(
481 marking_state_->live_bytes(page), page);
482 }
483
GetSweepingPageSafe(AllocationSpace space)484 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
485 base::LockGuard<base::Mutex> guard(&mutex_);
486 DCHECK(IsValidSweepingSpace(space));
487 int space_index = GetSweepSpaceIndex(space);
488 Page* page = nullptr;
489 if (!sweeping_list_[space_index].empty()) {
490 page = sweeping_list_[space_index].front();
491 sweeping_list_[space_index].pop_front();
492 }
493 return page;
494 }
495
EnsurePageIsIterable(Page * page)496 void Sweeper::EnsurePageIsIterable(Page* page) {
497 AllocationSpace space = page->owner()->identity();
498 if (IsValidSweepingSpace(space)) {
499 SweepOrWaitUntilSweepingCompleted(page);
500 } else {
501 DCHECK(IsValidIterabilitySpace(space));
502 EnsureIterabilityCompleted();
503 }
504 }
505
EnsureIterabilityCompleted()506 void Sweeper::EnsureIterabilityCompleted() {
507 if (!iterability_in_progress_) return;
508
509 if (FLAG_concurrent_sweeping && iterability_task_started_) {
510 if (heap_->isolate()->cancelable_task_manager()->TryAbort(
511 iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
512 iterability_task_semaphore_.Wait();
513 }
514 iterability_task_started_ = false;
515 }
516
517 for (Page* page : iterability_list_) {
518 MakeIterable(page);
519 }
520 iterability_list_.clear();
521 iterability_in_progress_ = false;
522 }
523
524 class Sweeper::IterabilityTask final : public CancelableTask {
525 public:
IterabilityTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_iterability_task)526 IterabilityTask(Isolate* isolate, Sweeper* sweeper,
527 base::Semaphore* pending_iterability_task)
528 : CancelableTask(isolate),
529 sweeper_(sweeper),
530 pending_iterability_task_(pending_iterability_task),
531 tracer_(isolate->heap()->tracer()) {}
532
~IterabilityTask()533 virtual ~IterabilityTask() {}
534
535 private:
RunInternal()536 void RunInternal() final {
537 TRACE_BACKGROUND_GC(tracer_,
538 GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
539 for (Page* page : sweeper_->iterability_list_) {
540 sweeper_->MakeIterable(page);
541 }
542 sweeper_->iterability_list_.clear();
543 pending_iterability_task_->Signal();
544 }
545
546 Sweeper* const sweeper_;
547 base::Semaphore* const pending_iterability_task_;
548 GCTracer* const tracer_;
549
550 DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
551 };
552
StartIterabilityTasks()553 void Sweeper::StartIterabilityTasks() {
554 if (!iterability_in_progress_) return;
555
556 DCHECK(!iterability_task_started_);
557 if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
558 auto task = base::make_unique<IterabilityTask>(
559 heap_->isolate(), this, &iterability_task_semaphore_);
560 iterability_task_id_ = task->id();
561 iterability_task_started_ = true;
562 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
563 }
564 }
565
AddPageForIterability(Page * page)566 void Sweeper::AddPageForIterability(Page* page) {
567 DCHECK(sweeping_in_progress_);
568 DCHECK(iterability_in_progress_);
569 DCHECK(!iterability_task_started_);
570 DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
571 DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
572
573 iterability_list_.push_back(page);
574 page->set_concurrent_sweeping_state(Page::kSweepingPending);
575 }
576
MakeIterable(Page * page)577 void Sweeper::MakeIterable(Page* page) {
578 DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
579 const FreeSpaceTreatmentMode free_space_mode =
580 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
581 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
582 }
583
584 } // namespace internal
585 } // namespace v8
586