1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/compactor.h"
6
7 #include <map>
8 #include <numeric>
9 #include <unordered_map>
10 #include <unordered_set>
11
12 #include "include/cppgc/macros.h"
13 #include "src/heap/cppgc/compaction-worklists.h"
14 #include "src/heap/cppgc/globals.h"
15 #include "src/heap/cppgc/heap-base.h"
16 #include "src/heap/cppgc/heap-page.h"
17 #include "src/heap/cppgc/heap-space.h"
18 #include "src/heap/cppgc/memory.h"
19 #include "src/heap/cppgc/object-poisoner.h"
20 #include "src/heap/cppgc/raw-heap.h"
21 #include "src/heap/cppgc/stats-collector.h"
22
23 namespace cppgc {
24 namespace internal {
25
26 namespace {
27 // Freelist size threshold that must be exceeded before compaction
28 // should be considered.
29 static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
30
31 // The real worker behind heap compaction, recording references to movable
32 // objects ("slots".) When the objects end up being compacted and moved,
33 // relocate() will adjust the slots to point to the new location of the
34 // object along with handling references for interior pointers.
35 //
36 // The MovableReferences object is created and maintained for the lifetime
37 // of one heap compaction-enhanced GC.
38 class MovableReferences final {
39 using MovableReference = CompactionWorklists::MovableReference;
40
41 public:
MovableReferences(HeapBase & heap)42 explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
43
44 // Adds a slot for compaction. Filters slots in dead objects.
45 void AddOrFilter(MovableReference*);
46
47 // Relocates a backing store |from| -> |to|.
48 void Relocate(Address from, Address to);
49
50 // Relocates interior slots in a backing store that is moved |from| -> |to|.
51 void RelocateInteriorReferences(Address from, Address to, size_t size);
52
53 // Updates the collection of callbacks from the item pushed the worklist by
54 // marking visitors.
55 void UpdateCallbacks();
56
57 private:
58 HeapBase& heap_;
59
60 // Map from movable reference (value) to its slot. Upon moving an object its
61 // slot pointing to it requires updating. Movable reference should currently
62 // have only a single movable reference to them registered.
63 std::unordered_map<MovableReference, MovableReference*> movable_references_;
64
65 // Map of interior slots to their final location. Needs to be an ordered map
66 // as it is used to walk through slots starting at a given memory address.
67 // Requires log(n) lookup to make the early bailout reasonably fast.
68 //
69 // - The initial value for a given key is nullptr.
70 // - Upon moving an object this value is adjusted accordingly.
71 std::map<MovableReference*, Address> interior_movable_references_;
72
73 #if DEBUG
74 // The following two collections are used to allow refer back from a slot to
75 // an already moved object.
76 std::unordered_set<const void*> moved_objects_;
77 std::unordered_map<MovableReference*, MovableReference>
78 interior_slot_to_object_;
79 #endif // DEBUG
80 };
81
AddOrFilter(MovableReference * slot)82 void MovableReferences::AddOrFilter(MovableReference* slot) {
83 const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
84 CHECK_NOT_NULL(slot_page);
85
86 const void* value = *slot;
87 if (!value) return;
88
89 // All slots and values are part of Oilpan's heap.
90 // - Slots may be contained within dead objects if e.g. the write barrier
91 // registered the slot while backing itself has not been marked live in
92 // time. Slots in dead objects are filtered below.
93 // - Values may only be contained in or point to live objects.
94
95 const HeapObjectHeader& slot_header =
96 slot_page->ObjectHeaderFromInnerAddress(slot);
97 // Filter the slot since the object that contains the slot is dead.
98 if (!slot_header.IsMarked()) return;
99
100 const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
101 CHECK_NOT_NULL(value_page);
102
103 // The following cases are not compacted and do not require recording:
104 // - Compactable object on large pages.
105 // - Compactable object on non-compactable spaces.
106 if (value_page->is_large() || !value_page->space().is_compactable()) return;
107
108 // Slots must reside in and values must point to live objects at this
109 // point. |value| usually points to a separate object but can also point
110 // to the an interior pointer in the same object storage which is why the
111 // dynamic header lookup is required.
112 const HeapObjectHeader& value_header =
113 value_page->ObjectHeaderFromInnerAddress(value);
114 CHECK(value_header.IsMarked());
115
116 // Slots may have been recorded already but must point to the same value.
117 auto reference_it = movable_references_.find(value);
118 if (V8_UNLIKELY(reference_it != movable_references_.end())) {
119 CHECK_EQ(slot, reference_it->second);
120 return;
121 }
122
123 // Add regular movable reference.
124 movable_references_.emplace(value, slot);
125
126 // Check whether the slot itself resides on a page that is compacted.
127 if (V8_LIKELY(!slot_page->space().is_compactable())) return;
128
129 CHECK_EQ(interior_movable_references_.end(),
130 interior_movable_references_.find(slot));
131 interior_movable_references_.emplace(slot, nullptr);
132 #if DEBUG
133 interior_slot_to_object_.emplace(slot, slot_header.ObjectStart());
134 #endif // DEBUG
135 }
136
Relocate(Address from,Address to)137 void MovableReferences::Relocate(Address from, Address to) {
138 #if DEBUG
139 moved_objects_.insert(from);
140 #endif // DEBUG
141
142 // Interior slots always need to be processed for moved objects.
143 // Consider an object A with slot A.x pointing to value B where A is
144 // allocated on a movable page itself. When B is finally moved, it needs to
145 // find the corresponding slot A.x. Object A may be moved already and the
146 // memory may have been freed, which would result in a crash.
147 if (!interior_movable_references_.empty()) {
148 const HeapObjectHeader& header = HeapObjectHeader::FromObject(to);
149 const size_t size = header.ObjectSize();
150 RelocateInteriorReferences(from, to, size);
151 }
152
153 auto it = movable_references_.find(from);
154 // This means that there is no corresponding slot for a live object.
155 // This may happen because a mutator may change the slot to point to a
156 // different object because e.g. incremental marking marked an object
157 // as live that was later on replaced.
158 if (it == movable_references_.end()) {
159 return;
160 }
161
162 // If the object is referenced by a slot that is contained on a compacted
163 // area itself, check whether it can be updated already.
164 MovableReference* slot = it->second;
165 auto interior_it = interior_movable_references_.find(slot);
166 if (interior_it != interior_movable_references_.end()) {
167 MovableReference* slot_location =
168 reinterpret_cast<MovableReference*>(interior_it->second);
169 if (!slot_location) {
170 interior_it->second = to;
171 #if DEBUG
172 // Check that the containing object has not been moved yet.
173 auto reverse_it = interior_slot_to_object_.find(slot);
174 DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
175 DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
176 #endif // DEBUG
177 } else {
178 slot = slot_location;
179 }
180 }
181
182 // Compaction is atomic so slot should not be updated during compaction.
183 DCHECK_EQ(from, *slot);
184
185 // Update the slots new value.
186 *slot = to;
187 }
188
RelocateInteriorReferences(Address from,Address to,size_t size)189 void MovableReferences::RelocateInteriorReferences(Address from, Address to,
190 size_t size) {
191 // |from| is a valid address for a slot.
192 auto interior_it = interior_movable_references_.lower_bound(
193 reinterpret_cast<MovableReference*>(from));
194 if (interior_it == interior_movable_references_.end()) return;
195 DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
196
197 size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
198 while (offset < size) {
199 if (!interior_it->second) {
200 // Update the interior reference value, so that when the object the slot
201 // is pointing to is moved, it can re-use this value.
202 Address refernece = to + offset;
203 interior_it->second = refernece;
204
205 // If the |slot|'s content is pointing into the region [from, from +
206 // size) we are dealing with an interior pointer that does not point to
207 // a valid HeapObjectHeader. Such references need to be fixed up
208 // immediately.
209 Address& reference_contents = *reinterpret_cast<Address*>(refernece);
210 if (reference_contents > from && reference_contents < (from + size)) {
211 reference_contents = reference_contents - from + to;
212 }
213 }
214
215 interior_it++;
216 if (interior_it == interior_movable_references_.end()) return;
217 offset = reinterpret_cast<Address>(interior_it->first) - from;
218 }
219 }
220
221 class CompactionState final {
222 CPPGC_STACK_ALLOCATED();
223 using Pages = std::vector<NormalPage*>;
224
225 public:
CompactionState(NormalPageSpace * space,MovableReferences & movable_references)226 CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
227 : space_(space), movable_references_(movable_references) {}
228
AddPage(NormalPage * page)229 void AddPage(NormalPage* page) {
230 DCHECK_EQ(space_, &page->space());
231 // If not the first page, add |page| onto the available pages chain.
232 if (!current_page_)
233 current_page_ = page;
234 else
235 available_pages_.push_back(page);
236 }
237
RelocateObject(const NormalPage * page,const Address header,size_t size)238 void RelocateObject(const NormalPage* page, const Address header,
239 size_t size) {
240 // Allocate and copy over the live object.
241 Address compact_frontier =
242 current_page_->PayloadStart() + used_bytes_in_current_page_;
243 if (compact_frontier + size > current_page_->PayloadEnd()) {
244 // Can't fit on current page. Add remaining onto the freelist and advance
245 // to next available page.
246 ReturnCurrentPageToSpace();
247
248 current_page_ = available_pages_.back();
249 available_pages_.pop_back();
250 used_bytes_in_current_page_ = 0;
251 compact_frontier = current_page_->PayloadStart();
252 }
253 if (V8_LIKELY(compact_frontier != header)) {
254 // Use a non-overlapping copy, if possible.
255 if (current_page_ == page)
256 memmove(compact_frontier, header, size);
257 else
258 memcpy(compact_frontier, header, size);
259 movable_references_.Relocate(header + sizeof(HeapObjectHeader),
260 compact_frontier + sizeof(HeapObjectHeader));
261 }
262 current_page_->object_start_bitmap().SetBit(compact_frontier);
263 used_bytes_in_current_page_ += size;
264 DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
265 }
266
FinishCompactingSpace()267 void FinishCompactingSpace() {
268 // If the current page hasn't been allocated into, add it to the available
269 // list, for subsequent release below.
270 if (used_bytes_in_current_page_ == 0) {
271 available_pages_.push_back(current_page_);
272 } else {
273 ReturnCurrentPageToSpace();
274 }
275
276 // Return remaining available pages to the free page pool, decommitting
277 // them from the pagefile.
278 for (NormalPage* page : available_pages_) {
279 SetMemoryInaccessible(page->PayloadStart(), page->PayloadSize());
280 NormalPage::Destroy(page);
281 }
282 }
283
FinishCompactingPage(NormalPage * page)284 void FinishCompactingPage(NormalPage* page) {
285 #if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
286 defined(V8_USE_ADDRESS_SANITIZER)
287 // Zap the unused portion, until it is either compacted into or freed.
288 if (current_page_ != page) {
289 ZapMemory(page->PayloadStart(), page->PayloadSize());
290 } else {
291 ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
292 page->PayloadSize() - used_bytes_in_current_page_);
293 }
294 #endif
295 page->object_start_bitmap().MarkAsFullyPopulated();
296 }
297
298 private:
ReturnCurrentPageToSpace()299 void ReturnCurrentPageToSpace() {
300 DCHECK_EQ(space_, ¤t_page_->space());
301 space_->AddPage(current_page_);
302 if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
303 // Put the remainder of the page onto the free list.
304 size_t freed_size =
305 current_page_->PayloadSize() - used_bytes_in_current_page_;
306 Address payload = current_page_->PayloadStart();
307 Address free_start = payload + used_bytes_in_current_page_;
308 SetMemoryInaccessible(free_start, freed_size);
309 space_->free_list().Add({free_start, freed_size});
310 current_page_->object_start_bitmap().SetBit(free_start);
311 }
312 }
313
314 NormalPageSpace* space_;
315 MovableReferences& movable_references_;
316 // Page into which compacted object will be written to.
317 NormalPage* current_page_ = nullptr;
318 // Offset into |current_page_| to the next free address.
319 size_t used_bytes_in_current_page_ = 0;
320 // Additional pages in the current space that can be used as compaction
321 // targets. Pages that remain available at the compaction can be released.
322 Pages available_pages_;
323 };
324
CompactPage(NormalPage * page,CompactionState & compaction_state)325 void CompactPage(NormalPage* page, CompactionState& compaction_state) {
326 compaction_state.AddPage(page);
327
328 page->object_start_bitmap().Clear();
329
330 for (Address header_address = page->PayloadStart();
331 header_address < page->PayloadEnd();) {
332 HeapObjectHeader* header =
333 reinterpret_cast<HeapObjectHeader*>(header_address);
334 size_t size = header->AllocatedSize();
335 DCHECK_GT(size, 0u);
336 DCHECK_LT(size, kPageSize);
337
338 if (header->IsFree()) {
339 // Unpoison the freelist entry so that we can compact into it as wanted.
340 ASAN_UNPOISON_MEMORY_REGION(header_address, size);
341 header_address += size;
342 continue;
343 }
344
345 if (!header->IsMarked()) {
346 // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
347 // guaranteed to be on the mutator thread - no need to postpone
348 // finalization.
349 header->Finalize();
350
351 // As compaction is under way, leave the freed memory accessible
352 // while compacting the rest of the page. We just zap the payload
353 // to catch out other finalizers trying to access it.
354 #if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
355 defined(V8_USE_ADDRESS_SANITIZER)
356 ZapMemory(header, size);
357 #endif
358 header_address += size;
359 continue;
360 }
361
362 // Object is marked.
363 #if !defined(CPPGC_YOUNG_GENERATION)
364 header->Unmark();
365 #endif
366 // Potentially unpoison the live object as well as it is the source of
367 // the copy.
368 ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
369 compaction_state.RelocateObject(page, header_address, size);
370 header_address += size;
371 }
372
373 compaction_state.FinishCompactingPage(page);
374 }
375
CompactSpace(NormalPageSpace * space,MovableReferences & movable_references)376 void CompactSpace(NormalPageSpace* space,
377 MovableReferences& movable_references) {
378 using Pages = NormalPageSpace::Pages;
379
380 #ifdef V8_USE_ADDRESS_SANITIZER
381 UnmarkedObjectsPoisoner().Traverse(*space);
382 #endif // V8_USE_ADDRESS_SANITIZER
383
384 DCHECK(space->is_compactable());
385
386 space->free_list().Clear();
387
388 // Compaction generally follows Jonker's algorithm for fast garbage
389 // compaction. Compaction is performed in-place, sliding objects down over
390 // unused holes for a smaller heap page footprint and improved locality. A
391 // "compaction pointer" is consequently kept, pointing to the next available
392 // address to move objects down to. It will belong to one of the already
393 // compacted pages for this space, but as compaction proceeds, it will not
394 // belong to the same page as the one being currently compacted.
395 //
396 // The compaction pointer is represented by the
397 // |(current_page_, used_bytes_in_current_page_)| pair, with
398 // |used_bytes_in_current_page_| being the offset into |current_page_|, making
399 // up the next available location. When the compaction of an arena page causes
400 // the compaction pointer to exhaust the current page it is compacting into,
401 // page compaction will advance the current page of the compaction
402 // pointer, as well as the allocation point.
403 //
404 // By construction, the page compaction can be performed without having
405 // to allocate any new pages. So to arrange for the page compaction's
406 // supply of freed, available pages, we chain them together after each
407 // has been "compacted from". The page compaction will then reuse those
408 // as needed, and once finished, the chained, available pages can be
409 // released back to the OS.
410 //
411 // To ease the passing of the compaction state when iterating over an
412 // arena's pages, package it up into a |CompactionState|.
413
414 Pages pages = space->RemoveAllPages();
415 if (pages.empty()) return;
416
417 CompactionState compaction_state(space, movable_references);
418 for (BasePage* page : pages) {
419 // Large objects do not belong to this arena.
420 CompactPage(NormalPage::From(page), compaction_state);
421 }
422
423 compaction_state.FinishCompactingSpace();
424 // Sweeping will verify object start bitmap of compacted space.
425 }
426
UpdateHeapResidency(const std::vector<NormalPageSpace * > & spaces)427 size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
428 return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
429 [](size_t acc, const NormalPageSpace* space) {
430 DCHECK(space->is_compactable());
431 if (!space->size()) return acc;
432 return acc + space->free_list().Size();
433 });
434 }
435
436 } // namespace
437
Compactor(RawHeap & heap)438 Compactor::Compactor(RawHeap& heap) : heap_(heap) {
439 for (auto& space : heap_) {
440 if (!space->is_compactable()) continue;
441 DCHECK_EQ(&heap, space->raw_heap());
442 compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
443 }
444 }
445
ShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state) const446 bool Compactor::ShouldCompact(
447 GarbageCollector::Config::MarkingType marking_type,
448 GarbageCollector::Config::StackState stack_state) const {
449 if (compactable_spaces_.empty() ||
450 (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
451 stack_state ==
452 GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
453 // The following check ensures that tests that want to test compaction are
454 // not interrupted by garbage collections that cannot use compaction.
455 DCHECK(!enable_for_next_gc_for_testing_);
456 return false;
457 }
458
459 if (enable_for_next_gc_for_testing_) {
460 return true;
461 }
462
463 size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
464
465 return free_list_size > kFreeListSizeThreshold;
466 }
467
InitializeIfShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)468 void Compactor::InitializeIfShouldCompact(
469 GarbageCollector::Config::MarkingType marking_type,
470 GarbageCollector::Config::StackState stack_state) {
471 DCHECK(!is_enabled_);
472
473 if (!ShouldCompact(marking_type, stack_state)) return;
474
475 compaction_worklists_ = std::make_unique<CompactionWorklists>();
476
477 is_enabled_ = true;
478 is_cancelled_ = false;
479 }
480
CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)481 bool Compactor::CancelIfShouldNotCompact(
482 GarbageCollector::Config::MarkingType marking_type,
483 GarbageCollector::Config::StackState stack_state) {
484 if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
485
486 is_cancelled_ = true;
487 is_enabled_ = false;
488 return true;
489 }
490
CompactSpacesIfEnabled()491 Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
492 if (is_cancelled_ && compaction_worklists_) {
493 compaction_worklists_->movable_slots_worklist()->Clear();
494 compaction_worklists_.reset();
495 }
496 if (!is_enabled_) return CompactableSpaceHandling::kSweep;
497
498 StatsCollector::EnabledScope stats_scope(heap_.heap()->stats_collector(),
499 StatsCollector::kAtomicCompact);
500
501 MovableReferences movable_references(*heap_.heap());
502
503 CompactionWorklists::MovableReferencesWorklist::Local local(
504 compaction_worklists_->movable_slots_worklist());
505 CompactionWorklists::MovableReference* slot;
506 while (local.Pop(&slot)) {
507 movable_references.AddOrFilter(slot);
508 }
509 compaction_worklists_.reset();
510
511 for (NormalPageSpace* space : compactable_spaces_) {
512 CompactSpace(space, movable_references);
513 }
514
515 enable_for_next_gc_for_testing_ = false;
516 is_enabled_ = false;
517 return CompactableSpaceHandling::kIgnore;
518 }
519
EnableForNextGCForTesting()520 void Compactor::EnableForNextGCForTesting() {
521 DCHECK_NULL(heap_.heap()->marker());
522 enable_for_next_gc_for_testing_ = true;
523 }
524
525 } // namespace internal
526 } // namespace cppgc
527