• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/concurrent-marking.h"
6 
7 #include <stack>
8 #include <unordered_map>
9 
10 #include "include/v8config.h"
11 #include "src/common/globals.h"
12 #include "src/execution/isolate.h"
13 #include "src/heap/gc-tracer-inl.h"
14 #include "src/heap/gc-tracer.h"
15 #include "src/heap/heap-inl.h"
16 #include "src/heap/heap.h"
17 #include "src/heap/mark-compact-inl.h"
18 #include "src/heap/mark-compact.h"
19 #include "src/heap/marking-visitor-inl.h"
20 #include "src/heap/marking-visitor.h"
21 #include "src/heap/marking.h"
22 #include "src/heap/memory-chunk.h"
23 #include "src/heap/memory-measurement-inl.h"
24 #include "src/heap/memory-measurement.h"
25 #include "src/heap/objects-visiting-inl.h"
26 #include "src/heap/objects-visiting.h"
27 #include "src/heap/weak-object-worklists.h"
28 #include "src/init/v8.h"
29 #include "src/objects/data-handler-inl.h"
30 #include "src/objects/embedder-data-array-inl.h"
31 #include "src/objects/hash-table-inl.h"
32 #include "src/objects/js-array-buffer-inl.h"
33 #include "src/objects/slots-inl.h"
34 #include "src/objects/transitions-inl.h"
35 #include "src/objects/visitors.h"
36 #include "src/utils/utils-inl.h"
37 #include "src/utils/utils.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 class ConcurrentMarkingState final
43     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
44  public:
ConcurrentMarkingState(PtrComprCageBase cage_base,MemoryChunkDataMap * memory_chunk_data)45   ConcurrentMarkingState(PtrComprCageBase cage_base,
46                          MemoryChunkDataMap* memory_chunk_data)
47       : MarkingStateBase(cage_base), memory_chunk_data_(memory_chunk_data) {}
48 
bitmap(const BasicMemoryChunk * chunk)49   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
50     return chunk->marking_bitmap<AccessMode::ATOMIC>();
51   }
52 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)53   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
54     (*memory_chunk_data_)[chunk].live_bytes += by;
55   }
56 
57   // The live_bytes and SetLiveBytes methods of the marking state are
58   // not used by the concurrent marker.
59 
60  private:
61   MemoryChunkDataMap* memory_chunk_data_;
62 };
63 
64 // Helper class for storing in-object slot addresses and values.
65 class SlotSnapshot {
66  public:
SlotSnapshot()67   SlotSnapshot() : number_of_slots_(0) {}
68   SlotSnapshot(const SlotSnapshot&) = delete;
69   SlotSnapshot& operator=(const SlotSnapshot&) = delete;
number_of_slots() const70   int number_of_slots() const { return number_of_slots_; }
slot(int i) const71   ObjectSlot slot(int i) const { return snapshot_[i].first; }
value(int i) const72   Object value(int i) const { return snapshot_[i].second; }
clear()73   void clear() { number_of_slots_ = 0; }
add(ObjectSlot slot,Object value)74   void add(ObjectSlot slot, Object value) {
75     snapshot_[number_of_slots_++] = {slot, value};
76   }
77 
78  private:
79   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
80   int number_of_slots_;
81   std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
82 };
83 
84 class ConcurrentMarkingVisitor final
85     : public MarkingVisitorBase<ConcurrentMarkingVisitor,
86                                 ConcurrentMarkingState> {
87  public:
ConcurrentMarkingVisitor(int task_id,MarkingWorklists::Local * local_marking_worklists,WeakObjects::Local * local_weak_objects,Heap * heap,unsigned mark_compact_epoch,base::EnumSet<CodeFlushMode> code_flush_mode,bool embedder_tracing_enabled,bool should_keep_ages_unchanged,MemoryChunkDataMap * memory_chunk_data)88   ConcurrentMarkingVisitor(int task_id,
89                            MarkingWorklists::Local* local_marking_worklists,
90                            WeakObjects::Local* local_weak_objects, Heap* heap,
91                            unsigned mark_compact_epoch,
92                            base::EnumSet<CodeFlushMode> code_flush_mode,
93                            bool embedder_tracing_enabled,
94                            bool should_keep_ages_unchanged,
95                            MemoryChunkDataMap* memory_chunk_data)
96       : MarkingVisitorBase(local_marking_worklists, local_weak_objects, heap,
97                            mark_compact_epoch, code_flush_mode,
98                            embedder_tracing_enabled,
99                            should_keep_ages_unchanged),
100         marking_state_(heap->isolate(), memory_chunk_data),
101         memory_chunk_data_(memory_chunk_data) {}
102 
103   template <typename T>
Cast(HeapObject object)104   static V8_INLINE T Cast(HeapObject object) {
105     return T::cast(object);
106   }
107 
108   // HeapVisitor overrides to implement the snapshotting protocol.
109 
AllowDefaultJSObjectVisit()110   bool AllowDefaultJSObjectVisit() { return false; }
111 
VisitJSObject(Map map,JSObject object)112   int VisitJSObject(Map map, JSObject object) {
113     return VisitJSObjectSubclass(map, object);
114   }
115 
VisitJSObjectFast(Map map,JSObject object)116   int VisitJSObjectFast(Map map, JSObject object) {
117     return VisitJSObjectSubclassFast(map, object);
118   }
119 
VisitJSExternalObject(Map map,JSExternalObject object)120   int VisitJSExternalObject(Map map, JSExternalObject object) {
121     return VisitJSObjectSubclass(map, object);
122   }
123 
124 #if V8_ENABLE_WEBASSEMBLY
VisitWasmInstanceObject(Map map,WasmInstanceObject object)125   int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
126     return VisitJSObjectSubclass(map, object);
127   }
VisitWasmSuspenderObject(Map map,WasmSuspenderObject object)128   int VisitWasmSuspenderObject(Map map, WasmSuspenderObject object) {
129     return VisitJSObjectSubclass(map, object);
130   }
131 #endif  // V8_ENABLE_WEBASSEMBLY
132 
VisitJSWeakCollection(Map map,JSWeakCollection object)133   int VisitJSWeakCollection(Map map, JSWeakCollection object) {
134     return VisitJSObjectSubclass(map, object);
135   }
136 
VisitJSFinalizationRegistry(Map map,JSFinalizationRegistry object)137   int VisitJSFinalizationRegistry(Map map, JSFinalizationRegistry object) {
138     return VisitJSObjectSubclass(map, object);
139   }
140 
VisitConsString(Map map,ConsString object)141   int VisitConsString(Map map, ConsString object) {
142     return VisitFullyWithSnapshot(map, object);
143   }
144 
VisitSlicedString(Map map,SlicedString object)145   int VisitSlicedString(Map map, SlicedString object) {
146     return VisitFullyWithSnapshot(map, object);
147   }
148 
VisitThinString(Map map,ThinString object)149   int VisitThinString(Map map, ThinString object) {
150     return VisitFullyWithSnapshot(map, object);
151   }
152 
VisitSeqOneByteString(Map map,SeqOneByteString object)153   int VisitSeqOneByteString(Map map, SeqOneByteString object) {
154     if (!ShouldVisit(object)) return 0;
155     VisitMapPointer(object);
156     return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
157   }
158 
VisitSeqTwoByteString(Map map,SeqTwoByteString object)159   int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
160     if (!ShouldVisit(object)) return 0;
161     VisitMapPointer(object);
162     return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
163   }
164 
165   // Implements ephemeron semantics: Marks value if key is already reachable.
166   // Returns true if value was actually marked.
ProcessEphemeron(HeapObject key,HeapObject value)167   bool ProcessEphemeron(HeapObject key, HeapObject value) {
168     if (marking_state_.IsBlackOrGrey(key)) {
169       if (marking_state_.WhiteToGrey(value)) {
170         local_marking_worklists_->Push(value);
171         return true;
172       }
173 
174     } else if (marking_state_.IsWhite(value)) {
175       local_weak_objects_->next_ephemerons_local.Push(Ephemeron{key, value});
176     }
177     return false;
178   }
179 
180   // HeapVisitor override.
ShouldVisit(HeapObject object)181   bool ShouldVisit(HeapObject object) {
182     return marking_state_.GreyToBlack(object);
183   }
184 
ShouldVisitUnaccounted(HeapObject object)185   bool ShouldVisitUnaccounted(HeapObject object) {
186     return marking_state_.GreyToBlackUnaccounted(object);
187   }
188 
189  private:
190   // Helper class for collecting in-object slot addresses and values.
191   class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
192    public:
SlotSnapshottingVisitor(SlotSnapshot * slot_snapshot,PtrComprCageBase cage_base,PtrComprCageBase code_cage_base)193     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot,
194                                      PtrComprCageBase cage_base,
195                                      PtrComprCageBase code_cage_base)
196         : ObjectVisitorWithCageBases(cage_base, code_cage_base),
197           slot_snapshot_(slot_snapshot) {
198       slot_snapshot_->clear();
199     }
200 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)201     void VisitPointers(HeapObject host, ObjectSlot start,
202                        ObjectSlot end) override {
203       for (ObjectSlot p = start; p < end; ++p) {
204         Object object = p.Relaxed_Load(cage_base());
205         slot_snapshot_->add(p, object);
206       }
207     }
208 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)209     void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
210       CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
211       Object code = slot.Relaxed_Load(code_cage_base());
212       slot_snapshot_->add(ObjectSlot(slot.address()), code);
213     }
214 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)215     void VisitPointers(HeapObject host, MaybeObjectSlot start,
216                        MaybeObjectSlot end) override {
217       // This should never happen, because we don't use snapshotting for objects
218       // which contain weak references.
219       UNREACHABLE();
220     }
221 
VisitCodeTarget(Code host,RelocInfo * rinfo)222     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
223       // This should never happen, because snapshotting is performed only on
224       // some String subclasses.
225       UNREACHABLE();
226     }
227 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)228     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
229       // This should never happen, because snapshotting is performed only on
230       // some String subclasses.
231       UNREACHABLE();
232     }
233 
VisitCustomWeakPointers(HeapObject host,ObjectSlot start,ObjectSlot end)234     void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
235                                  ObjectSlot end) override {
236       // This should never happen, because snapshotting is performed only on
237       // some String subclasses.
238       UNREACHABLE();
239     }
240 
241    private:
242     SlotSnapshot* slot_snapshot_;
243   };
244 
245   template <typename T>
VisitJSObjectSubclassFast(Map map,T object)246   int VisitJSObjectSubclassFast(Map map, T object) {
247     using TBodyDescriptor = typename T::FastBodyDescriptor;
248     return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
249   }
250 
251   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitJSObjectSubclass(Map map,T object)252   int VisitJSObjectSubclass(Map map, T object) {
253     if (!ShouldVisit(object)) return 0;
254     int size = TBodyDescriptor::SizeOf(map, object);
255     int used_size = map.UsedInstanceSize();
256     DCHECK_LE(used_size, size);
257     DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
258     this->VisitMapPointer(object);
259     // It is important to visit only the used field and ignore the slack fields
260     // because the slack fields may be trimmed concurrently.
261     TBodyDescriptor::IterateBody(map, object, used_size, this);
262     return size;
263   }
264 
265   template <typename T>
VisitLeftTrimmableArray(Map map,T object)266   int VisitLeftTrimmableArray(Map map, T object) {
267     // The length() function checks that the length is a Smi.
268     // This is not necessarily the case if the array is being left-trimmed.
269     Object length = object.unchecked_length(kAcquireLoad);
270     // No accounting here to avoid re-reading the length which could already
271     // contain a non-SMI value when left-trimming happens concurrently.
272     if (!ShouldVisitUnaccounted(object)) return 0;
273     // The cached length must be the actual length as the array is not black.
274     // Left trimming marks the array black before over-writing the length.
275     DCHECK(length.IsSmi());
276     int size = T::SizeFor(Smi::ToInt(length));
277     marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
278                                       size);
279     VisitMapPointer(object);
280     T::BodyDescriptor::IterateBody(map, object, size, this);
281     return size;
282   }
283 
VisitPointersInSnapshot(HeapObject host,const SlotSnapshot & snapshot)284   void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
285     for (int i = 0; i < snapshot.number_of_slots(); i++) {
286       ObjectSlot slot = snapshot.slot(i);
287       Object object = snapshot.value(i);
288       DCHECK(!HasWeakHeapObjectTag(object));
289       if (!object.IsHeapObject()) continue;
290       HeapObject heap_object = HeapObject::cast(object);
291       concrete_visitor()->SynchronizePageAccess(heap_object);
292       BasicMemoryChunk* target_page =
293           BasicMemoryChunk::FromHeapObject(heap_object);
294       if (!is_shared_heap_ && target_page->InSharedHeap()) continue;
295       MarkObject(host, heap_object);
296       RecordSlot(host, slot, heap_object);
297     }
298   }
299 
300   template <typename T>
VisitFullyWithSnapshot(Map map,T object)301   int VisitFullyWithSnapshot(Map map, T object) {
302     using TBodyDescriptor = typename T::BodyDescriptor;
303     int size = TBodyDescriptor::SizeOf(map, object);
304     const SlotSnapshot& snapshot =
305         MakeSlotSnapshot<T, TBodyDescriptor>(map, object, size);
306     if (!ShouldVisit(object)) return 0;
307     VisitPointersInSnapshot(object, snapshot);
308     return size;
309   }
310 
311   template <typename T, typename TBodyDescriptor>
MakeSlotSnapshot(Map map,T object,int size)312   const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
313     SlotSnapshottingVisitor visitor(&slot_snapshot_, cage_base(),
314                                     code_cage_base());
315     visitor.VisitPointer(object, object.map_slot());
316     TBodyDescriptor::IterateBody(map, object, size, &visitor);
317     return slot_snapshot_;
318   }
319 
320   template <typename TSlot>
RecordSlot(HeapObject object,TSlot slot,HeapObject target)321   void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
322     MarkCompactCollector::RecordSlot(object, slot, target);
323   }
324 
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)325   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
326     if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target))
327       return;
328 
329     MarkCompactCollector::RecordRelocSlotInfo info =
330         MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
331 
332     MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
333     if (!data.typed_slots) {
334       data.typed_slots.reset(new TypedSlots());
335     }
336     data.typed_slots->Insert(info.slot_type, info.offset);
337   }
338 
SynchronizePageAccess(HeapObject heap_object)339   void SynchronizePageAccess(HeapObject heap_object) {
340 #ifdef THREAD_SANITIZER
341     // This is needed because TSAN does not process the memory fence
342     // emitted after page initialization.
343     BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
344 #endif
345   }
346 
marking_state()347   ConcurrentMarkingState* marking_state() { return &marking_state_; }
348 
retaining_path_mode()349   TraceRetainingPathMode retaining_path_mode() {
350     return TraceRetainingPathMode::kDisabled;
351   }
352 
353   ConcurrentMarkingState marking_state_;
354   MemoryChunkDataMap* memory_chunk_data_;
355   SlotSnapshot slot_snapshot_;
356 
357   friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
358                                   ConcurrentMarkingState>;
359 };
360 
361 // Strings can change maps due to conversion to thin string or external strings.
362 // Use unchecked cast to avoid data race in slow dchecks.
363 template <>
Cast(HeapObject object)364 ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
365   return ConsString::unchecked_cast(object);
366 }
367 
368 template <>
Cast(HeapObject object)369 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
370   return SlicedString::unchecked_cast(object);
371 }
372 
373 template <>
Cast(HeapObject object)374 ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
375   return ThinString::unchecked_cast(object);
376 }
377 
378 template <>
Cast(HeapObject object)379 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
380   return SeqOneByteString::unchecked_cast(object);
381 }
382 
383 template <>
Cast(HeapObject object)384 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
385   return SeqTwoByteString::unchecked_cast(object);
386 }
387 
388 // Fixed array can become a free space during left trimming.
389 template <>
Cast(HeapObject object)390 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
391   return FixedArray::unchecked_cast(object);
392 }
393 
394 // The Deserializer changes the map from StrongDescriptorArray to
395 // DescriptorArray
396 template <>
Cast(HeapObject object)397 StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
398   return StrongDescriptorArray::unchecked_cast(DescriptorArray::cast(object));
399 }
400 
401 class ConcurrentMarking::JobTask : public v8::JobTask {
402  public:
JobTask(ConcurrentMarking * concurrent_marking,unsigned mark_compact_epoch,base::EnumSet<CodeFlushMode> code_flush_mode,bool should_keep_ages_unchanged)403   JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
404           base::EnumSet<CodeFlushMode> code_flush_mode,
405           bool should_keep_ages_unchanged)
406       : concurrent_marking_(concurrent_marking),
407         mark_compact_epoch_(mark_compact_epoch),
408         code_flush_mode_(code_flush_mode),
409         should_keep_ages_unchanged_(should_keep_ages_unchanged) {}
410 
411   ~JobTask() override = default;
412   JobTask(const JobTask&) = delete;
413   JobTask& operator=(const JobTask&) = delete;
414 
415   // v8::JobTask overrides.
Run(JobDelegate * delegate)416   void Run(JobDelegate* delegate) override {
417     if (delegate->IsJoiningThread()) {
418       // TRACE_GC is not needed here because the caller opens the right scope.
419       concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
420                                should_keep_ages_unchanged_);
421     } else {
422       TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
423                      GCTracer::Scope::MC_BACKGROUND_MARKING,
424                      ThreadKind::kBackground);
425       concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
426                                should_keep_ages_unchanged_);
427     }
428   }
429 
GetMaxConcurrency(size_t worker_count) const430   size_t GetMaxConcurrency(size_t worker_count) const override {
431     return concurrent_marking_->GetMaxConcurrency(worker_count);
432   }
433 
434  private:
435   ConcurrentMarking* concurrent_marking_;
436   const unsigned mark_compact_epoch_;
437   base::EnumSet<CodeFlushMode> code_flush_mode_;
438   const bool should_keep_ages_unchanged_;
439 };
440 
ConcurrentMarking(Heap * heap,MarkingWorklists * marking_worklists,WeakObjects * weak_objects)441 ConcurrentMarking::ConcurrentMarking(Heap* heap,
442                                      MarkingWorklists* marking_worklists,
443                                      WeakObjects* weak_objects)
444     : heap_(heap),
445       marking_worklists_(marking_worklists),
446       weak_objects_(weak_objects) {
447 #ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
448   // Concurrent marking requires atomic object field writes.
449   CHECK(!FLAG_concurrent_marking);
450 #endif
451 }
452 
Run(JobDelegate * delegate,base::EnumSet<CodeFlushMode> code_flush_mode,unsigned mark_compact_epoch,bool should_keep_ages_unchanged)453 void ConcurrentMarking::Run(JobDelegate* delegate,
454                             base::EnumSet<CodeFlushMode> code_flush_mode,
455                             unsigned mark_compact_epoch,
456                             bool should_keep_ages_unchanged) {
457   size_t kBytesUntilInterruptCheck = 64 * KB;
458   int kObjectsUntilInterrupCheck = 1000;
459   uint8_t task_id = delegate->GetTaskId() + 1;
460   TaskState* task_state = &task_state_[task_id];
461   auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
462   MarkingWorklists::Local local_marking_worklists(
463       marking_worklists_, cpp_heap
464                               ? cpp_heap->CreateCppMarkingState()
465                               : MarkingWorklists::Local::kNoCppMarkingState);
466   WeakObjects::Local local_weak_objects(weak_objects_);
467   ConcurrentMarkingVisitor visitor(
468       task_id, &local_marking_worklists, &local_weak_objects, heap_,
469       mark_compact_epoch, code_flush_mode,
470       heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
471       &task_state->memory_chunk_data);
472   NativeContextInferrer& native_context_inferrer =
473       task_state->native_context_inferrer;
474   NativeContextStats& native_context_stats = task_state->native_context_stats;
475   double time_ms;
476   size_t marked_bytes = 0;
477   Isolate* isolate = heap_->isolate();
478   if (FLAG_trace_concurrent_marking) {
479     isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
480                                 task_id);
481   }
482   bool another_ephemeron_iteration = false;
483 
484   {
485     TimedScope scope(&time_ms);
486 
487     {
488       Ephemeron ephemeron;
489       while (local_weak_objects.current_ephemerons_local.Pop(&ephemeron)) {
490         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
491           another_ephemeron_iteration = true;
492         }
493       }
494     }
495     bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
496     bool done = false;
497     while (!done) {
498       size_t current_marked_bytes = 0;
499       int objects_processed = 0;
500       while (current_marked_bytes < kBytesUntilInterruptCheck &&
501              objects_processed < kObjectsUntilInterrupCheck) {
502         HeapObject object;
503         if (!local_marking_worklists.Pop(&object)) {
504           done = true;
505           break;
506         }
507         objects_processed++;
508 
509         Address new_space_top = kNullAddress;
510         Address new_space_limit = kNullAddress;
511         Address new_large_object = kNullAddress;
512 
513         if (heap_->new_space()) {
514           // The order of the two loads is important.
515           new_space_top = heap_->new_space()->original_top_acquire();
516           new_space_limit = heap_->new_space()->original_limit_relaxed();
517         }
518 
519         if (heap_->new_lo_space()) {
520           new_large_object = heap_->new_lo_space()->pending_object();
521         }
522 
523         Address addr = object.address();
524 
525         if ((new_space_top <= addr && addr < new_space_limit) ||
526             addr == new_large_object) {
527           local_marking_worklists.PushOnHold(object);
528         } else {
529           Map map = object.map(isolate, kAcquireLoad);
530           if (is_per_context_mode) {
531             Address context;
532             if (native_context_inferrer.Infer(isolate, map, object, &context)) {
533               local_marking_worklists.SwitchToContext(context);
534             }
535           }
536           size_t visited_size = visitor.Visit(map, object);
537           if (is_per_context_mode) {
538             native_context_stats.IncrementSize(
539                 local_marking_worklists.Context(), map, object, visited_size);
540           }
541           current_marked_bytes += visited_size;
542         }
543       }
544       if (objects_processed > 0) another_ephemeron_iteration = true;
545       marked_bytes += current_marked_bytes;
546       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
547                                                 marked_bytes);
548       if (delegate->ShouldYield()) {
549         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
550                      "ConcurrentMarking::Run Preempted");
551         break;
552       }
553     }
554 
555     if (done) {
556       Ephemeron ephemeron;
557       while (local_weak_objects.discovered_ephemerons_local.Pop(&ephemeron)) {
558         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
559           another_ephemeron_iteration = true;
560         }
561       }
562     }
563 
564     local_marking_worklists.Publish();
565     local_weak_objects.Publish();
566     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
567     total_marked_bytes_ += marked_bytes;
568 
569     if (another_ephemeron_iteration) {
570       set_another_ephemeron_iteration(true);
571     }
572   }
573   if (FLAG_trace_concurrent_marking) {
574     heap_->isolate()->PrintWithTimestamp(
575         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
576         static_cast<int>(marked_bytes / KB), time_ms);
577   }
578 }
579 
GetMaxConcurrency(size_t worker_count)580 size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
581   size_t marking_items = marking_worklists_->shared()->Size();
582   for (auto& worklist : marking_worklists_->context_worklists())
583     marking_items += worklist.worklist->Size();
584   return std::min<size_t>(
585       kMaxTasks,
586       worker_count +
587           std::max<size_t>({marking_items,
588                             weak_objects_->discovered_ephemerons.Size(),
589                             weak_objects_->current_ephemerons.Size()}));
590 }
591 
ScheduleJob(TaskPriority priority)592 void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
593   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
594   DCHECK(!heap_->IsTearingDown());
595   DCHECK(!job_handle_ || !job_handle_->IsValid());
596 
597   job_handle_ = V8::GetCurrentPlatform()->PostJob(
598       priority, std::make_unique<JobTask>(
599                     this, heap_->mark_compact_collector()->epoch(),
600                     heap_->mark_compact_collector()->code_flush_mode(),
601                     heap_->ShouldCurrentGCKeepAgesUnchanged()));
602   DCHECK(job_handle_->IsValid());
603 }
604 
RescheduleJobIfNeeded(TaskPriority priority)605 void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
606   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
607   if (heap_->IsTearingDown()) return;
608 
609   if (marking_worklists_->shared()->IsEmpty() &&
610       weak_objects_->current_ephemerons.IsEmpty() &&
611       weak_objects_->discovered_ephemerons.IsEmpty()) {
612     return;
613   }
614   if (!job_handle_ || !job_handle_->IsValid()) {
615     ScheduleJob(priority);
616   } else {
617     if (priority != TaskPriority::kUserVisible)
618       job_handle_->UpdatePriority(priority);
619     job_handle_->NotifyConcurrencyIncrease();
620   }
621 }
622 
Join()623 void ConcurrentMarking::Join() {
624   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
625   if (!job_handle_ || !job_handle_->IsValid()) return;
626   job_handle_->Join();
627 }
628 
Pause()629 bool ConcurrentMarking::Pause() {
630   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
631   if (!job_handle_ || !job_handle_->IsValid()) return false;
632 
633   job_handle_->Cancel();
634   return true;
635 }
636 
IsStopped()637 bool ConcurrentMarking::IsStopped() {
638   if (!FLAG_concurrent_marking) return true;
639 
640   return !job_handle_ || !job_handle_->IsValid();
641 }
642 
FlushNativeContexts(NativeContextStats * main_stats)643 void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
644   DCHECK(!job_handle_ || !job_handle_->IsValid());
645   for (int i = 1; i <= kMaxTasks; i++) {
646     main_stats->Merge(task_state_[i].native_context_stats);
647     task_state_[i].native_context_stats.Clear();
648   }
649 }
650 
FlushMemoryChunkData(MajorNonAtomicMarkingState * marking_state)651 void ConcurrentMarking::FlushMemoryChunkData(
652     MajorNonAtomicMarkingState* marking_state) {
653   DCHECK(!job_handle_ || !job_handle_->IsValid());
654   for (int i = 1; i <= kMaxTasks; i++) {
655     MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
656     for (auto& pair : memory_chunk_data) {
657       // ClearLiveness sets the live bytes to zero.
658       // Pages with zero live bytes might be already unmapped.
659       MemoryChunk* memory_chunk = pair.first;
660       MemoryChunkData& data = pair.second;
661       if (data.live_bytes) {
662         marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
663       }
664       if (data.typed_slots) {
665         RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
666                                               std::move(data.typed_slots));
667       }
668     }
669     memory_chunk_data.clear();
670     task_state_[i].marked_bytes = 0;
671   }
672   total_marked_bytes_ = 0;
673 }
674 
ClearMemoryChunkData(MemoryChunk * chunk)675 void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
676   DCHECK(!job_handle_ || !job_handle_->IsValid());
677   for (int i = 1; i <= kMaxTasks; i++) {
678     auto it = task_state_[i].memory_chunk_data.find(chunk);
679     if (it != task_state_[i].memory_chunk_data.end()) {
680       it->second.live_bytes = 0;
681       it->second.typed_slots.reset();
682     }
683   }
684 }
685 
TotalMarkedBytes()686 size_t ConcurrentMarking::TotalMarkedBytes() {
687   size_t result = 0;
688   for (int i = 1; i <= kMaxTasks; i++) {
689     result +=
690         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
691   }
692   result += total_marked_bytes_;
693   return result;
694 }
695 
PauseScope(ConcurrentMarking * concurrent_marking)696 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
697     : concurrent_marking_(concurrent_marking),
698       resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
699   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
700 }
701 
~PauseScope()702 ConcurrentMarking::PauseScope::~PauseScope() {
703   if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
704 }
705 
706 }  // namespace internal
707 }  // namespace v8
708