1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/compactor.h"
6
7 #include <map>
8 #include <numeric>
9 #include <unordered_map>
10 #include <unordered_set>
11
12 #include "include/cppgc/macros.h"
13 #include "src/heap/cppgc/compaction-worklists.h"
14 #include "src/heap/cppgc/globals.h"
15 #include "src/heap/cppgc/heap-base.h"
16 #include "src/heap/cppgc/heap-page.h"
17 #include "src/heap/cppgc/heap-space.h"
18 #include "src/heap/cppgc/raw-heap.h"
19
20 namespace cppgc {
21 namespace internal {
22
23 namespace {
24 // Freelist size threshold that must be exceeded before compaction
25 // should be considered.
26 static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
27
28 // The real worker behind heap compaction, recording references to movable
29 // objects ("slots".) When the objects end up being compacted and moved,
30 // relocate() will adjust the slots to point to the new location of the
31 // object along with handling references for interior pointers.
32 //
33 // The MovableReferences object is created and maintained for the lifetime
34 // of one heap compaction-enhanced GC.
35 class MovableReferences final {
36 using MovableReference = CompactionWorklists::MovableReference;
37
38 public:
MovableReferences(HeapBase & heap)39 explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
40
41 // Adds a slot for compaction. Filters slots in dead objects.
42 void AddOrFilter(MovableReference*);
43
44 // Relocates a backing store |from| -> |to|.
45 void Relocate(Address from, Address to);
46
47 // Relocates interior slots in a backing store that is moved |from| -> |to|.
48 void RelocateInteriorReferences(Address from, Address to, size_t size);
49
50 // Updates the collection of callbacks from the item pushed the worklist by
51 // marking visitors.
52 void UpdateCallbacks();
53
54 private:
55 HeapBase& heap_;
56
57 // Map from movable reference (value) to its slot. Upon moving an object its
58 // slot pointing to it requires updating. Movable reference should currently
59 // have only a single movable reference to them registered.
60 std::unordered_map<MovableReference, MovableReference*> movable_references_;
61
62 // Map of interior slots to their final location. Needs to be an ordered map
63 // as it is used to walk through slots starting at a given memory address.
64 // Requires log(n) lookup to make the early bailout reasonably fast.
65 //
66 // - The initial value for a given key is nullptr.
67 // - Upon moving an object this value is adjusted accordingly.
68 std::map<MovableReference*, Address> interior_movable_references_;
69
70 #if DEBUG
71 // The following two collections are used to allow refer back from a slot to
72 // an already moved object.
73 std::unordered_set<const void*> moved_objects_;
74 std::unordered_map<MovableReference*, MovableReference>
75 interior_slot_to_object_;
76 #endif // DEBUG
77 };
78
AddOrFilter(MovableReference * slot)79 void MovableReferences::AddOrFilter(MovableReference* slot) {
80 const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
81 CHECK_NOT_NULL(slot_page);
82
83 const void* value = *slot;
84 if (!value) return;
85
86 // All slots and values are part of Oilpan's heap.
87 // - Slots may be contained within dead objects if e.g. the write barrier
88 // registered the slot while backing itself has not been marked live in
89 // time. Slots in dead objects are filtered below.
90 // - Values may only be contained in or point to live objects.
91
92 const HeapObjectHeader& slot_header =
93 slot_page->ObjectHeaderFromInnerAddress(slot);
94 // Filter the slot since the object that contains the slot is dead.
95 if (!slot_header.IsMarked()) return;
96
97 const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
98 CHECK_NOT_NULL(value_page);
99
100 // The following cases are not compacted and do not require recording:
101 // - Compactable object on large pages.
102 // - Compactable object on non-compactable spaces.
103 if (value_page->is_large() || !value_page->space()->is_compactable()) return;
104
105 // Slots must reside in and values must point to live objects at this
106 // point. |value| usually points to a separate object but can also point
107 // to the an interior pointer in the same object storage which is why the
108 // dynamic header lookup is required.
109 const HeapObjectHeader& value_header =
110 value_page->ObjectHeaderFromInnerAddress(value);
111 CHECK(value_header.IsMarked());
112
113 // Slots may have been recorded already but must point to the same value.
114 auto reference_it = movable_references_.find(value);
115 if (V8_UNLIKELY(reference_it != movable_references_.end())) {
116 CHECK_EQ(slot, reference_it->second);
117 return;
118 }
119
120 // Add regular movable reference.
121 movable_references_.emplace(value, slot);
122
123 // Check whether the slot itself resides on a page that is compacted.
124 if (V8_LIKELY(!slot_page->space()->is_compactable())) return;
125
126 CHECK_EQ(interior_movable_references_.end(),
127 interior_movable_references_.find(slot));
128 interior_movable_references_.emplace(slot, nullptr);
129 #if DEBUG
130 interior_slot_to_object_.emplace(slot, slot_header.Payload());
131 #endif // DEBUG
132 }
133
Relocate(Address from,Address to)134 void MovableReferences::Relocate(Address from, Address to) {
135 #if DEBUG
136 moved_objects_.insert(from);
137 #endif // DEBUG
138
139 // Interior slots always need to be processed for moved objects.
140 // Consider an object A with slot A.x pointing to value B where A is
141 // allocated on a movable page itself. When B is finally moved, it needs to
142 // find the corresponding slot A.x. Object A may be moved already and the
143 // memory may have been freed, which would result in a crash.
144 if (!interior_movable_references_.empty()) {
145 const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
146 const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
147 RelocateInteriorReferences(from, to, size);
148 }
149
150 auto it = movable_references_.find(from);
151 // This means that there is no corresponding slot for a live object.
152 // This may happen because a mutator may change the slot to point to a
153 // different object because e.g. incremental marking marked an object
154 // as live that was later on replaced.
155 if (it == movable_references_.end()) {
156 return;
157 }
158
159 // If the object is referenced by a slot that is contained on a compacted
160 // area itself, check whether it can be updated already.
161 MovableReference* slot = it->second;
162 auto interior_it = interior_movable_references_.find(slot);
163 if (interior_it != interior_movable_references_.end()) {
164 MovableReference* slot_location =
165 reinterpret_cast<MovableReference*>(interior_it->second);
166 if (!slot_location) {
167 interior_it->second = to;
168 #if DEBUG
169 // Check that the containing object has not been moved yet.
170 auto reverse_it = interior_slot_to_object_.find(slot);
171 DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
172 DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
173 #endif // DEBUG
174 } else {
175 slot = slot_location;
176 }
177 }
178
179 // Compaction is atomic so slot should not be updated during compaction.
180 DCHECK_EQ(from, *slot);
181
182 // Update the slots new value.
183 *slot = to;
184 }
185
RelocateInteriorReferences(Address from,Address to,size_t size)186 void MovableReferences::RelocateInteriorReferences(Address from, Address to,
187 size_t size) {
188 // |from| is a valid address for a slot.
189 auto interior_it = interior_movable_references_.lower_bound(
190 reinterpret_cast<MovableReference*>(from));
191 if (interior_it == interior_movable_references_.end()) return;
192 DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
193
194 size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
195 while (offset < size) {
196 if (!interior_it->second) {
197 // Update the interior reference value, so that when the object the slot
198 // is pointing to is moved, it can re-use this value.
199 Address refernece = to + offset;
200 interior_it->second = refernece;
201
202 // If the |slot|'s content is pointing into the region [from, from +
203 // size) we are dealing with an interior pointer that does not point to
204 // a valid HeapObjectHeader. Such references need to be fixed up
205 // immediately.
206 Address& reference_contents = *reinterpret_cast<Address*>(refernece);
207 if (reference_contents > from && reference_contents < (from + size)) {
208 reference_contents = reference_contents - from + to;
209 }
210 }
211
212 interior_it++;
213 if (interior_it == interior_movable_references_.end()) return;
214 offset = reinterpret_cast<Address>(interior_it->first) - from;
215 }
216 }
217
218 class CompactionState final {
219 CPPGC_STACK_ALLOCATED();
220 using Pages = std::vector<NormalPage*>;
221
222 public:
CompactionState(NormalPageSpace * space,MovableReferences & movable_references)223 CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
224 : space_(space), movable_references_(movable_references) {}
225
AddPage(NormalPage * page)226 void AddPage(NormalPage* page) {
227 DCHECK_EQ(space_, page->space());
228 // If not the first page, add |page| onto the available pages chain.
229 if (!current_page_)
230 current_page_ = page;
231 else
232 available_pages_.push_back(page);
233 }
234
RelocateObject(const NormalPage * page,const Address header,size_t size)235 void RelocateObject(const NormalPage* page, const Address header,
236 size_t size) {
237 // Allocate and copy over the live object.
238 Address compact_frontier =
239 current_page_->PayloadStart() + used_bytes_in_current_page_;
240 if (compact_frontier + size > current_page_->PayloadEnd()) {
241 // Can't fit on current page. Add remaining onto the freelist and advance
242 // to next available page.
243 ReturnCurrentPageToSpace();
244
245 current_page_ = available_pages_.back();
246 available_pages_.pop_back();
247 used_bytes_in_current_page_ = 0;
248 compact_frontier = current_page_->PayloadStart();
249 }
250 if (V8_LIKELY(compact_frontier != header)) {
251 // Use a non-overlapping copy, if possible.
252 if (current_page_ == page)
253 memmove(compact_frontier, header, size);
254 else
255 memcpy(compact_frontier, header, size);
256 movable_references_.Relocate(header + sizeof(HeapObjectHeader),
257 compact_frontier + sizeof(HeapObjectHeader));
258 }
259 current_page_->object_start_bitmap().SetBit(compact_frontier);
260 used_bytes_in_current_page_ += size;
261 DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
262 }
263
FinishCompactingSpace()264 void FinishCompactingSpace() {
265 // If the current page hasn't been allocated into, add it to the available
266 // list, for subsequent release below.
267 if (used_bytes_in_current_page_ == 0) {
268 available_pages_.push_back(current_page_);
269 } else {
270 ReturnCurrentPageToSpace();
271 }
272
273 // Return remaining available pages to the free page pool, decommitting
274 // them from the pagefile.
275 for (NormalPage* page : available_pages_) {
276 SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
277 NormalPage::Destroy(page);
278 }
279 }
280
FinishCompactingPage(NormalPage * page)281 void FinishCompactingPage(NormalPage* page) {
282 #if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
283 defined(MEMORY_SANITIZER)
284 // Zap the unused portion, until it is either compacted into or freed.
285 if (current_page_ != page) {
286 ZapMemory(page->PayloadStart(), page->PayloadSize());
287 } else {
288 ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
289 page->PayloadSize() - used_bytes_in_current_page_);
290 }
291 #endif
292 }
293
294 private:
ReturnCurrentPageToSpace()295 void ReturnCurrentPageToSpace() {
296 DCHECK_EQ(space_, current_page_->space());
297 space_->AddPage(current_page_);
298 if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
299 // Put the remainder of the page onto the free list.
300 size_t freed_size =
301 current_page_->PayloadSize() - used_bytes_in_current_page_;
302 Address payload = current_page_->PayloadStart();
303 Address free_start = payload + used_bytes_in_current_page_;
304 SET_MEMORY_INACCESSIBLE(free_start, freed_size);
305 space_->free_list().Add({free_start, freed_size});
306 current_page_->object_start_bitmap().SetBit(free_start);
307 }
308 }
309
310 NormalPageSpace* space_;
311 MovableReferences& movable_references_;
312 // Page into which compacted object will be written to.
313 NormalPage* current_page_ = nullptr;
314 // Offset into |current_page_| to the next free address.
315 size_t used_bytes_in_current_page_ = 0;
316 // Additional pages in the current space that can be used as compaction
317 // targets. Pages that remain available at the compaction can be released.
318 Pages available_pages_;
319 };
320
CompactPage(NormalPage * page,CompactionState & compaction_state)321 void CompactPage(NormalPage* page, CompactionState& compaction_state) {
322 compaction_state.AddPage(page);
323
324 page->object_start_bitmap().Clear();
325
326 for (Address header_address = page->PayloadStart();
327 header_address < page->PayloadEnd();) {
328 HeapObjectHeader* header =
329 reinterpret_cast<HeapObjectHeader*>(header_address);
330 size_t size = header->GetSize();
331 DCHECK_GT(size, 0u);
332 DCHECK_LT(size, kPageSize);
333
334 if (header->IsFree()) {
335 // Unpoison the freelist entry so that we can compact into it as wanted.
336 ASAN_UNPOISON_MEMORY_REGION(header_address, size);
337 header_address += size;
338 continue;
339 }
340
341 if (!header->IsMarked()) {
342 // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
343 // guaranteed to be on the mutator thread - no need to postpone
344 // finalization.
345 header->Finalize();
346
347 // As compaction is under way, leave the freed memory accessible
348 // while compacting the rest of the page. We just zap the payload
349 // to catch out other finalizers trying to access it.
350 #if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
351 defined(MEMORY_SANITIZER)
352 ZapMemory(header, size);
353 #endif
354 header_address += size;
355 continue;
356 }
357
358 // Object is marked.
359 #if !defined(CPPGC_YOUNG_GENERATION)
360 header->Unmark();
361 #endif
362 compaction_state.RelocateObject(page, header_address, size);
363 header_address += size;
364 }
365
366 compaction_state.FinishCompactingPage(page);
367 }
368
CompactSpace(NormalPageSpace * space,MovableReferences & movable_references)369 void CompactSpace(NormalPageSpace* space,
370 MovableReferences& movable_references) {
371 using Pages = NormalPageSpace::Pages;
372
373 DCHECK(space->is_compactable());
374
375 space->free_list().Clear();
376
377 // Compaction generally follows Jonker's algorithm for fast garbage
378 // compaction. Compaction is performed in-place, sliding objects down over
379 // unused holes for a smaller heap page footprint and improved locality. A
380 // "compaction pointer" is consequently kept, pointing to the next available
381 // address to move objects down to. It will belong to one of the already
382 // compacted pages for this space, but as compaction proceeds, it will not
383 // belong to the same page as the one being currently compacted.
384 //
385 // The compaction pointer is represented by the
386 // |(current_page_, used_bytes_in_current_page_)| pair, with
387 // |used_bytes_in_current_page_| being the offset into |current_page_|, making
388 // up the next available location. When the compaction of an arena page causes
389 // the compaction pointer to exhaust the current page it is compacting into,
390 // page compaction will advance the current page of the compaction
391 // pointer, as well as the allocation point.
392 //
393 // By construction, the page compaction can be performed without having
394 // to allocate any new pages. So to arrange for the page compaction's
395 // supply of freed, available pages, we chain them together after each
396 // has been "compacted from". The page compaction will then reuse those
397 // as needed, and once finished, the chained, available pages can be
398 // released back to the OS.
399 //
400 // To ease the passing of the compaction state when iterating over an
401 // arena's pages, package it up into a |CompactionState|.
402
403 Pages pages = space->RemoveAllPages();
404 if (pages.empty()) return;
405
406 CompactionState compaction_state(space, movable_references);
407 for (BasePage* page : pages) {
408 // Large objects do not belong to this arena.
409 CompactPage(NormalPage::From(page), compaction_state);
410 }
411
412 compaction_state.FinishCompactingSpace();
413 // Sweeping will verify object start bitmap of compacted space.
414 }
415
UpdateHeapResidency(const std::vector<NormalPageSpace * > & spaces)416 size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
417 return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
418 [](size_t acc, const NormalPageSpace* space) {
419 DCHECK(space->is_compactable());
420 if (!space->size()) return acc;
421 return acc + space->free_list().Size();
422 });
423 }
424
425 } // namespace
426
Compactor(RawHeap & heap)427 Compactor::Compactor(RawHeap& heap) : heap_(heap) {
428 for (auto& space : heap_) {
429 if (!space->is_compactable()) continue;
430 DCHECK_EQ(&heap, space->raw_heap());
431 compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
432 }
433 }
434
ShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)435 bool Compactor::ShouldCompact(
436 GarbageCollector::Config::MarkingType marking_type,
437 GarbageCollector::Config::StackState stack_state) {
438 if (compactable_spaces_.empty() ||
439 (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
440 stack_state ==
441 GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
442 // The following check ensures that tests that want to test compaction are
443 // not interrupted by garbage collections that cannot use compaction.
444 DCHECK(!enable_for_next_gc_for_testing_);
445 return false;
446 }
447
448 if (enable_for_next_gc_for_testing_) {
449 return true;
450 }
451
452 size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
453
454 return free_list_size > kFreeListSizeThreshold;
455 }
456
InitializeIfShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)457 void Compactor::InitializeIfShouldCompact(
458 GarbageCollector::Config::MarkingType marking_type,
459 GarbageCollector::Config::StackState stack_state) {
460 DCHECK(!is_enabled_);
461
462 if (!ShouldCompact(marking_type, stack_state)) return;
463
464 compaction_worklists_ = std::make_unique<CompactionWorklists>();
465
466 is_enabled_ = true;
467 enable_for_next_gc_for_testing_ = false;
468 }
469
CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)470 bool Compactor::CancelIfShouldNotCompact(
471 GarbageCollector::Config::MarkingType marking_type,
472 GarbageCollector::Config::StackState stack_state) {
473 if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
474
475 DCHECK_NOT_NULL(compaction_worklists_);
476 compaction_worklists_->movable_slots_worklist()->Clear();
477 compaction_worklists_.reset();
478
479 is_enabled_ = false;
480 return true;
481 }
482
CompactSpacesIfEnabled()483 Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
484 if (!is_enabled_) return CompactableSpaceHandling::kSweep;
485
486 MovableReferences movable_references(*heap_.heap());
487
488 CompactionWorklists::MovableReferencesWorklist::Local local(
489 compaction_worklists_->movable_slots_worklist());
490 CompactionWorklists::MovableReference* slot;
491 while (local.Pop(&slot)) {
492 movable_references.AddOrFilter(slot);
493 }
494 compaction_worklists_.reset();
495
496 for (NormalPageSpace* space : compactable_spaces_) {
497 CompactSpace(space, movable_references);
498 }
499
500 is_enabled_ = false;
501 return CompactableSpaceHandling::kIgnore;
502 }
503
504 } // namespace internal
505 } // namespace cppgc
506